llvm.org GIT mirror llvm / 9ebcde1
GlobalISel: Combine unmerge of merge with intermediate cast This eliminates some illegal intermediate vectors when operations are scalarized. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@365566 91177308-0d34-0410-b5e6-96231b3b80d8 Matt Arsenault 4 months ago
4 changed file(s) with 550 addition(s) and 15 deletion(s). Raw diff Collapse all Expand all
2626 MachineIRBuilder &Builder;
2727 MachineRegisterInfo &MRI;
2828 const LegalizerInfo &LI;
29
30 static bool isArtifactCast(unsigned Opc) {
31 switch (Opc) {
32 case TargetOpcode::G_TRUNC:
33 case TargetOpcode::G_SEXT:
34 case TargetOpcode::G_ZEXT:
35 case TargetOpcode::G_ANYEXT:
36 return true;
37 default:
38 return false;
39 }
40 }
2941
3042 public:
3143 LegalizationArtifactCombiner(MachineIRBuilder &B, MachineRegisterInfo &MRI,
207219 return false;
208220
209221 unsigned NumDefs = MI.getNumOperands() - 1;
222 MachineInstr *SrcDef =
223 getDefIgnoringCopies(MI.getOperand(NumDefs).getReg(), MRI);
224 if (!SrcDef)
225 return false;
210226
211227 LLT OpTy = MRI.getType(MI.getOperand(NumDefs).getReg());
212228 LLT DestTy = MRI.getType(MI.getOperand(0).getReg());
213
229 MachineInstr *MergeI = SrcDef;
230 unsigned ConvertOp = 0;
231
232 // Handle intermediate conversions
233 unsigned SrcOp = SrcDef->getOpcode();
234 if (isArtifactCast(SrcOp)) {
235 ConvertOp = SrcOp;
236 MergeI = getDefIgnoringCopies(SrcDef->getOperand(1).getReg(), MRI);
237 }
238
239 // FIXME: Handle scalarizing concat_vectors (scalar result type with vector
240 // source)
214241 unsigned MergingOpcode = getMergeOpcode(OpTy, DestTy);
215 MachineInstr *MergeI =
216 getOpcodeDef(MergingOpcode, MI.getOperand(NumDefs).getReg(), MRI);
217
218 if (!MergeI)
242 if (!MergeI || MergeI->getOpcode() != MergingOpcode)
219243 return false;
220244
221245 const unsigned NumMergeRegs = MergeI->getNumOperands() - 1;
222246
223247 if (NumMergeRegs < NumDefs) {
224 if (NumDefs % NumMergeRegs != 0)
248 if (ConvertOp != 0 || NumDefs % NumMergeRegs != 0)
225249 return false;
226250
227251 Builder.setInstr(MI);
243267 }
244268
245269 } else if (NumMergeRegs > NumDefs) {
246 if (NumMergeRegs % NumDefs != 0)
270 if (ConvertOp != 0 || NumMergeRegs % NumDefs != 0)
247271 return false;
248272
249273 Builder.setInstr(MI);
265289 }
266290
267291 } else {
292 LLT MergeSrcTy = MRI.getType(MergeI->getOperand(1).getReg());
293 if (ConvertOp) {
294 Builder.setInstr(MI);
295
296 for (unsigned Idx = 0; Idx < NumDefs; ++Idx) {
297 Register MergeSrc = MergeI->getOperand(Idx + 1).getReg();
298 Builder.buildInstr(ConvertOp, {MI.getOperand(Idx).getReg()},
299 {MergeSrc});
300 }
301
302 markInstAndDefDead(MI, *MergeI, DeadInsts);
303 return true;
304 }
268305 // FIXME: is a COPY appropriate if the types mismatch? We know both
269306 // registers are allocatable by now.
270 if (MRI.getType(MI.getOperand(0).getReg()) !=
271 MRI.getType(MergeI->getOperand(1).getReg()))
307 if (DestTy != MergeSrcTy)
272308 return false;
273309
274310 for (unsigned Idx = 0; Idx < NumDefs; ++Idx)
416452 MachineInstr *TmpDef = MRI.getVRegDef(PrevRegSrc);
417453 if (MRI.hasOneUse(PrevRegSrc)) {
418454 if (TmpDef != &DefMI) {
419 assert(TmpDef->getOpcode() == TargetOpcode::COPY &&
420 "Expecting copy here");
455 assert(TmpDef->getOpcode() == TargetOpcode::COPY ||
456 isArtifactCast(TmpDef->getOpcode()) &&
457 "Expecting copy or artifact cast here");
458
421459 DeadInsts.push_back(TmpDef);
422460 }
423461 } else
1414 #define LLVM_CODEGEN_GLOBALISEL_UTILS_H
1515
1616 #include "llvm/ADT/StringRef.h"
17 #include "llvm/CodeGen/Register.h"
1718
1819 namespace llvm {
1920
129130 /// See if Reg is defined by an single def instruction that is
130131 /// Opcode. Also try to do trivial folding if it's a COPY with
131132 /// same types. Returns null otherwise.
132 MachineInstr *getOpcodeDef(unsigned Opcode, unsigned Reg,
133 MachineInstr *getOpcodeDef(unsigned Opcode, Register Reg,
133134 const MachineRegisterInfo &MRI);
135
136 /// Find the def instruction for \p Reg, folding away any trivial copies. Note
137 /// it may still return a COPY, if it changes the type. May return nullptr if \p
138 /// Reg is not a generic virtual register.
139 MachineInstr *getDefIgnoringCopies(Register Reg,
140 const MachineRegisterInfo &MRI);
134141
135142 /// Returns an APFloat from Val converted to the appropriate size.
136143 APFloat getAPFloatFromSize(double Val, unsigned Size);
280280 return MI->getOperand(1).getFPImm();
281281 }
282282
283 llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, unsigned Reg,
284 const MachineRegisterInfo &MRI) {
283 llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
284 const MachineRegisterInfo &MRI) {
285285 auto *DefMI = MRI.getVRegDef(Reg);
286286 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
287287 if (!DstTy.isValid())
293293 break;
294294 DefMI = MRI.getVRegDef(SrcReg);
295295 }
296 return DefMI->getOpcode() == Opcode ? DefMI : nullptr;
296 return DefMI;
297 }
298
299 llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
300 const MachineRegisterInfo &MRI) {
301 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
302 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
297303 }
298304
299305 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
0 # NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
1 # RUN: llc -O0 -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck %s
2
3 ---
4 name: test_unmerge_values_s1_trunc_v2s1_of_build_vector_v2s32
5 body: |
6 bb.0:
7 ; CHECK-LABEL: name: test_unmerge_values_s1_trunc_v2s1_of_build_vector_v2s32
8 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
9 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
10 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
11 ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
12 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]]
13 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]]
14 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
15 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
16 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
17 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
18 ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
19 ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
20 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT1]](s32)
21 ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
22 ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
23 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
24 ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
25 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
26 %1:_(<2 x s32>) = COPY $vgpr0_vgpr1
27 %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>)
28 %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>)
29 %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4
30 %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5
31 %8:_(s32) = G_ANYEXT %6(s1)
32 %9:_(s32) = G_ANYEXT %7(s1)
33 %10:_(<2 x s32>) = G_BUILD_VECTOR %8, %9
34 %11:_(<2 x s1>) = G_TRUNC %10(<2 x s32>)
35 %12:_(s1), %13:_(s1) = G_UNMERGE_VALUES %11
36 %14:_(s32) = G_SEXT %12
37 %15:_(s32) = G_SEXT %13
38 %16:_(<2 x s32>) = G_BUILD_VECTOR %14, %15
39 $vgpr0_vgpr1 = COPY %16
40
41 ...
42
43 # Requires looking thorugh extra copies between the build_vector,
44 # trunc and unmerge.
45 ---
46 name: test_unmerge_values_s1_trunc_v2s1_of_build_vector_v2s32_extra_copies
47 body: |
48 bb.0:
49 ; CHECK-LABEL: name: test_unmerge_values_s1_trunc_v2s1_of_build_vector_v2s32_extra_copies
50 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
51 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
52 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
53 ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
54 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]]
55 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]]
56 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
57 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
58 ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
59 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ANYEXT]](s32)
60 ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
61 ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
62 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[ANYEXT1]](s32)
63 ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
64 ; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SHL1]], [[C]](s32)
65 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ASHR]](s32), [[ASHR1]](s32)
66 ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
67 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
68 %1:_(<2 x s32>) = COPY $vgpr0_vgpr1
69 %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>)
70 %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>)
71 %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4
72 %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5
73 %8:_(s32) = G_ANYEXT %6(s1)
74 %9:_(s32) = G_ANYEXT %7(s1)
75 %10:_(<2 x s32>) = G_BUILD_VECTOR %8, %9
76 %11:_(<2 x s32>) = COPY %10
77 %12:_(<2 x s1>) = G_TRUNC %11(<2 x s32>)
78 %13:_(<2 x s1>) = COPY %12
79 %14:_(s1), %15:_(s1) = G_UNMERGE_VALUES %13
80 %16:_(s32) = G_SEXT %14
81 %17:_(s32) = G_SEXT %15
82 %18:_(<2 x s32>) = G_BUILD_VECTOR %16, %17
83 $vgpr0_vgpr1 = COPY %18
84
85 ...
86
87 ---
88 name: test_unmerge_values_s32_sext_v2s32_of_build_vector_v2s16
89 body: |
90 bb.0:
91 ; CHECK-LABEL: name: test_unmerge_values_s32_sext_v2s32_of_build_vector_v2s16
92 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
93 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
94 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
95 ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
96 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]]
97 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]]
98 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP]](s1)
99 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP1]](s1)
100 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[ANYEXT]](s16)
101 ; CHECK: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[ANYEXT1]](s16)
102 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SEXT]](s32), [[SEXT1]](s32)
103 ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
104 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
105 %1:_(<2 x s32>) = COPY $vgpr0_vgpr1
106 %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>)
107 %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>)
108 %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4
109 %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5
110 %8:_(s16) = G_ANYEXT %6
111 %9:_(s16) = G_ANYEXT %7
112 %10:_(<2 x s16>) = G_BUILD_VECTOR %8, %9
113 %11:_(<2 x s32>) = G_SEXT %10
114 %12:_(s32), %13:_(s32) = G_UNMERGE_VALUES %11
115 %14:_(<2 x s32>) = G_BUILD_VECTOR %12, %13
116 $vgpr0_vgpr1 = COPY %14
117
118 ...
119
120 ---
121 name: test_unmerge_values_s32_zext_v2s32_of_build_vector_v2s16
122 body: |
123 bb.0:
124 ; CHECK-LABEL: name: test_unmerge_values_s32_zext_v2s32_of_build_vector_v2s16
125 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
126 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
127 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
128 ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
129 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]]
130 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]]
131 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP]](s1)
132 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP1]](s1)
133 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT]](s16)
134 ; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT1]](s16)
135 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ZEXT]](s32), [[ZEXT1]](s32)
136 ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
137 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
138 %1:_(<2 x s32>) = COPY $vgpr0_vgpr1
139 %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>)
140 %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>)
141 %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4
142 %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5
143 %8:_(s16) = G_ANYEXT %6(s1)
144 %9:_(s16) = G_ANYEXT %7(s1)
145 %10:_(<2 x s16>) = G_BUILD_VECTOR %8, %9
146 %11:_(<2 x s32>) = G_ZEXT %10
147 %12:_(s32), %13:_(s32) = G_UNMERGE_VALUES %11
148 %14:_(<2 x s32>) = G_BUILD_VECTOR %12(s32), %13(s32)
149 $vgpr0_vgpr1 = COPY %14(<2 x s32>)
150
151 ...
152
153 ---
154 name: test_unmerge_values_s32_anyext_v2s32_of_build_vector_v2s16
155 body: |
156 bb.0:
157 ; CHECK-LABEL: name: test_unmerge_values_s32_anyext_v2s32_of_build_vector_v2s16
158 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
159 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
160 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
161 ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
162 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]]
163 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]]
164 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP]](s1)
165 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ICMP1]](s1)
166 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ANYEXT]](s32), [[ANYEXT1]](s32)
167 ; CHECK: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
168 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
169 %1:_(<2 x s32>) = COPY $vgpr0_vgpr1
170 %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>)
171 %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>)
172 %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4
173 %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5
174 %8:_(s16) = G_ANYEXT %6(s1)
175 %9:_(s16) = G_ANYEXT %7(s1)
176 %10:_(<2 x s16>) = G_BUILD_VECTOR %8, %9
177 %11:_(<2 x s32>) = G_ANYEXT %10
178 %12:_(s32), %13:_(s32) = G_UNMERGE_VALUES %11
179 %14:_(<2 x s32>) = G_BUILD_VECTOR %12, %13
180 $vgpr0_vgpr1 = COPY %14
181
182 ...
183
184 ---
185 name: test_unmerge_values_v2s16_zext_v4s32_of_build_vector_v4s16
186
187 body: |
188 bb.0:
189 ; CHECK-LABEL: name: test_unmerge_values_v2s16_zext_v4s32_of_build_vector_v4s16
190 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
191 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
192 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
193 ; CHECK: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
194 ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV]](s32), [[UV2]]
195 ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[UV1]](s32), [[UV3]]
196 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP]](s1)
197 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[ICMP1]](s1)
198 ; CHECK: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT]](s16)
199 ; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT1]](s16)
200 ; CHECK: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT]](s16)
201 ; CHECK: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[ANYEXT1]](s16)
202 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[ZEXT]](s32), [[ZEXT1]](s32), [[ZEXT2]](s32), [[ZEXT3]](s32)
203 ; CHECK: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>), [[UV6:%[0-9]+]]:_(<2 x s16>), [[UV7:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s32>)
204 ; CHECK: S_ENDPGM 0, implicit [[UV4]](<2 x s16>), implicit [[UV5]](<2 x s16>), implicit [[UV6]](<2 x s16>), implicit [[UV7]](<2 x s16>)
205 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
206 %1:_(<2 x s32>) = COPY $vgpr0_vgpr1
207 %2:_(s32), %3:_(s32) = G_UNMERGE_VALUES %0(<2 x s32>)
208 %4:_(s32), %5:_(s32) = G_UNMERGE_VALUES %1(<2 x s32>)
209 %6:_(s1) = G_ICMP intpred(ne), %2(s32), %4
210 %7:_(s1) = G_ICMP intpred(ne), %3(s32), %5
211 %8:_(s16) = G_ANYEXT %6
212 %9:_(s16) = G_ANYEXT %7
213 %10:_(<4 x s16>) = G_BUILD_VECTOR %8, %9, %8, %9
214 %11:_(<4 x s32>) = G_ZEXT %10
215 %12:_(<2 x s16>), %13:_(<2 x s16>), %14:_(<2 x s16>), %15:_(<2 x s16>) = G_UNMERGE_VALUES %11
216 S_ENDPGM 0, implicit %12, implicit %13, implicit %14, implicit %15
217
218 ...
219
220 ---
221 name: test_unmerge_values_s1_trunc_v4s1_of_concat_vectors_v4s32_v2s32
222 body: |
223 bb.0:
224 ; CHECK-LABEL: name: test_unmerge_values_s1_trunc_v4s1_of_concat_vectors_v4s32_v2s32
225 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
226 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
227 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>)
228 ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s1>) = G_TRUNC [[CONCAT_VECTORS]](<4 x s32>)
229 ; CHECK: [[UV:%[0-9]+]]:_(s1), [[UV1:%[0-9]+]]:_(s1), [[UV2:%[0-9]+]]:_(s1), [[UV3:%[0-9]+]]:_(s1) = G_UNMERGE_VALUES [[TRUNC]](<4 x s1>)
230 ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s1)
231 ; CHECK: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s1)
232 ; CHECK: [[SEXT2:%[0-9]+]]:_(s32) = G_SEXT [[UV2]](s1)
233 ; CHECK: [[SEXT3:%[0-9]+]]:_(s32) = G_SEXT [[UV3]](s1)
234 ; CHECK: $vgpr0 = COPY [[SEXT]](s32)
235 ; CHECK: $vgpr1 = COPY [[SEXT1]](s32)
236 ; CHECK: $vgpr2 = COPY [[SEXT2]](s32)
237 ; CHECK: $vgpr3 = COPY [[SEXT3]](s32)
238 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
239 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
240 %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1
241 %3:_(<4 x s1>) = G_TRUNC %2
242 %4:_(s1), %5:_(s1), %6:_(s1), %7:_(s1) = G_UNMERGE_VALUES %3
243 %8:_(s32) = G_SEXT %4
244 %9:_(s32) = G_SEXT %5
245 %10:_(s32) = G_SEXT %6
246 %11:_(s32) = G_SEXT %7
247 $vgpr0 = COPY %8
248 $vgpr1 = COPY %9
249 $vgpr2 = COPY %10
250 $vgpr3 = COPY %11
251 ...
252
253 ---
254 name: test_unmerge_values_s16_of_concat_vectors_v2s16_v2s16
255 body: |
256 bb.0:
257 ; CHECK-LABEL: name: test_unmerge_values_s16_of_concat_vectors_v2s16_v2s16
258 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
259 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
260 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[COPY]](<2 x s16>), [[COPY1]](<2 x s16>)
261 ; CHECK: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s16>)
262 ; CHECK: S_ENDPGM 0, implicit [[UV]](s16), implicit [[UV1]](s16), implicit [[UV2]](s16), implicit [[UV3]](s16)
263 %0:_(<2 x s16>) = COPY $vgpr0
264 %1:_(<2 x s16>) = COPY $vgpr1
265 %2:_(<4 x s16>) = G_CONCAT_VECTORS %0, %1
266 %3:_(s16), %4:_(s16), %5:_(s16), %6:_(s16) = G_UNMERGE_VALUES %2
267 S_ENDPGM 0, implicit %3, implicit %4, implicit %5, implicit %6
268 ...
269
270 ---
271 name: test_unmerge_values_s32_of_concat_vectors_v2s32_v2s32
272 body: |
273 bb.0:
274 ; CHECK-LABEL: name: test_unmerge_values_s32_of_concat_vectors_v2s32_v2s32
275 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
276 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr1_vgpr2
277 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>)
278 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>)
279 ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32), implicit [[UV2]](s32), implicit [[UV3]](s32)
280 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
281 %1:_(<2 x s32>) = COPY $vgpr1_vgpr2
282 %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1
283 %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32) = G_UNMERGE_VALUES %2
284 S_ENDPGM 0, implicit %3, implicit %4, implicit %5, implicit %6
285 ...
286
287 ---
288 name: test_unmerge_values_s32_of_concat_vectors_v2s64_v2s64
289 body: |
290 bb.0:
291 ; CHECK-LABEL: name: test_unmerge_values_s32_of_concat_vectors_v2s64_v2s64
292 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
293 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
294 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>)
295 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32), [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32), [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s64>)
296 ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32), implicit [[UV2]](s32), implicit [[UV3]](s32), implicit [[UV4]](s32), implicit [[UV5]](s32), implicit [[UV6]](s32), implicit [[UV7]](s32)
297 %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
298 %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
299 %2:_(<4 x s64>) = G_CONCAT_VECTORS %0, %1
300 %3:_(s32), %4:_(s32), %5:_(s32), %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32), %10:_(s32) = G_UNMERGE_VALUES %2
301 S_ENDPGM 0, implicit %3, implicit %4, implicit %5, implicit %6, implicit %7, implicit %8, implicit %9, implicit %10
302 ...
303
304 ---
305 name: test_unmerge_values_s32_of_trunc_concat_vectors_v2s64_v2s64
306 body: |
307 bb.0:
308 ; CHECK-LABEL: name: test_unmerge_values_s32_of_trunc_concat_vectors_v2s64_v2s64
309 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
310 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
311 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s64>) = G_CONCAT_VECTORS [[COPY]](<2 x s64>), [[COPY1]](<2 x s64>)
312 ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s32>) = G_TRUNC [[CONCAT_VECTORS]](<4 x s64>)
313 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[TRUNC]](<4 x s32>)
314 ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32), implicit [[UV2]](s32), implicit [[UV3]](s32)
315 %0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
316 %1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
317 %2:_(<4 x s64>) = G_CONCAT_VECTORS %0, %1
318 %3:_(<4 x s32>) = G_TRUNC %2
319 %4:_(s32), %5:_(s32), %6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %3
320 S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7
321 ...
322
323 ---
324 name: test_unmerge_values_s64_of_sext_concat_vectors_v2s32_v2s32
325 body: |
326 bb.0:
327 ; CHECK-LABEL: name: test_unmerge_values_s64_of_sext_concat_vectors_v2s32_v2s32
328 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
329 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
330 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>)
331 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>)
332 ; CHECK: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[UV]](s32)
333 ; CHECK: [[SEXT1:%[0-9]+]]:_(s64) = G_SEXT [[UV1]](s32)
334 ; CHECK: [[SEXT2:%[0-9]+]]:_(s64) = G_SEXT [[UV2]](s32)
335 ; CHECK: [[SEXT3:%[0-9]+]]:_(s64) = G_SEXT [[UV3]](s32)
336 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[SEXT]](s64), [[SEXT1]](s64), [[SEXT2]](s64), [[SEXT3]](s64)
337 ; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s64>)
338 ; CHECK: S_ENDPGM 0, implicit [[UV4]](s64), implicit [[UV5]](s64), implicit [[UV6]](s64), implicit [[UV7]](s64)
339 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
340 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
341 %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1
342 %3:_(<4 x s64>) = G_SEXT %2
343 %4:_(s64), %5:_(s64), %6:_(s64), %7:_(s64) = G_UNMERGE_VALUES %3
344 S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7
345 ...
346
347 ---
348 name: test_unmerge_values_s64_of_zext_concat_vectors_v2s32_v2s32
349 body: |
350 bb.0:
351 ; CHECK-LABEL: name: test_unmerge_values_s64_of_zext_concat_vectors_v2s32_v2s32
352 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
353 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
354 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>)
355 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>)
356 ; CHECK: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[UV]](s32)
357 ; CHECK: [[ZEXT1:%[0-9]+]]:_(s64) = G_ZEXT [[UV1]](s32)
358 ; CHECK: [[ZEXT2:%[0-9]+]]:_(s64) = G_ZEXT [[UV2]](s32)
359 ; CHECK: [[ZEXT3:%[0-9]+]]:_(s64) = G_ZEXT [[UV3]](s32)
360 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[ZEXT]](s64), [[ZEXT1]](s64), [[ZEXT2]](s64), [[ZEXT3]](s64)
361 ; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s64>)
362 ; CHECK: S_ENDPGM 0, implicit [[UV4]](s64), implicit [[UV5]](s64), implicit [[UV6]](s64), implicit [[UV7]](s64)
363 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
364 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
365 %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1
366 %3:_(<4 x s64>) = G_ZEXT %2
367 %4:_(s64), %5:_(s64), %6:_(s64), %7:_(s64) = G_UNMERGE_VALUES %3
368 S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7
369 ...
370
371 ---
372 name: test_unmerge_values_s64_of_anyext_concat_vectors_v2s32_v2s32
373 body: |
374 bb.0:
375 ; CHECK-LABEL: name: test_unmerge_values_s64_of_anyext_concat_vectors_v2s32_v2s32
376 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
377 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
378 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>)
379 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>)
380 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32)
381 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32)
382 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[UV2]](s32)
383 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[UV3]](s32)
384 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[ANYEXT]](s64), [[ANYEXT1]](s64), [[ANYEXT2]](s64), [[ANYEXT3]](s64)
385 ; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64), [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s64>)
386 ; CHECK: S_ENDPGM 0, implicit [[UV4]](s64), implicit [[UV5]](s64), implicit [[UV6]](s64), implicit [[UV7]](s64)
387 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
388 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
389 %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1
390 %3:_(<4 x s64>) = G_ANYEXT %2
391 %4:_(s64), %5:_(s64), %6:_(s64), %7:_(s64) = G_UNMERGE_VALUES %3
392 S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7
393 ...
394
395 ---
396 name: test_unmerge_values_s8_of_trunc_v4s16_concat_vectors_v2s32_v2s32
397 body: |
398 bb.0:
399 ; CHECK-LABEL: name: test_unmerge_values_s8_of_trunc_v4s16_concat_vectors_v2s32_v2s32
400 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
401 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
402 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>)
403 ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[CONCAT_VECTORS]](<4 x s32>)
404 ; CHECK: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8), [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8), [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8), [[UV6:%[0-9]+]]:_(s8), [[UV7:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[TRUNC]](<4 x s16>)
405 ; CHECK: S_ENDPGM 0, implicit [[UV]](s8), implicit [[UV1]](s8), implicit [[UV2]](s8), implicit [[UV3]](s8), implicit [[UV4]](s8), implicit [[UV5]](s8), implicit [[UV6]](s8), implicit [[UV7]](s8)
406 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
407 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
408 %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1
409 %3:_(<4 x s16>) = G_TRUNC %2
410 %4:_(s8), %5:_(s8), %6:_(s8), %7:_(s8), %8:_(s8), %9:_(s8), %10:_(s8), %11:_(s8) = G_UNMERGE_VALUES %3
411 S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7, implicit %8, implicit %9, implicit %10, implicit %11
412 ...
413
414 ---
415 name: test_unmerge_values_s16_of_anyext_v4s64_concat_vectors_v2s32_v2s32
416 body: |
417 bb.0:
418 ; CHECK-LABEL: name: test_unmerge_values_s16_of_anyext_v4s64_concat_vectors_v2s32_v2s32
419 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
420 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
421 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>)
422 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[CONCAT_VECTORS]](<4 x s32>)
423 ; CHECK: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[UV]](s32)
424 ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s64) = G_ANYEXT [[UV1]](s32)
425 ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s64) = G_ANYEXT [[UV2]](s32)
426 ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s64) = G_ANYEXT [[UV3]](s32)
427 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s64>) = G_BUILD_VECTOR [[ANYEXT]](s64), [[ANYEXT1]](s64), [[ANYEXT2]](s64), [[ANYEXT3]](s64)
428 ; CHECK: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16), [[UV14:%[0-9]+]]:_(s16), [[UV15:%[0-9]+]]:_(s16), [[UV16:%[0-9]+]]:_(s16), [[UV17:%[0-9]+]]:_(s16), [[UV18:%[0-9]+]]:_(s16), [[UV19:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<4 x s64>)
429 ; CHECK: S_ENDPGM 0, implicit [[UV4]](s16), implicit [[UV5]](s16), implicit [[UV6]](s16), implicit [[UV7]](s16)
430 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
431 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
432 %2:_(<4 x s32>) = G_CONCAT_VECTORS %0, %1
433 %3:_(<4 x s64>) = G_ANYEXT %2
434 %4:_(s16), %5:_(s16), %6:_(s16), %7:_(s16), %8:_(s16), %9:_(s16), %10:_(s16), %11:_(s16), %12:_(s16), %13:_(s16), %14:_(s16), %15:_(s16), %16:_(s16), %17:_(s16), %18:_(s16), %19:_(s16) = G_UNMERGE_VALUES %3
435 S_ENDPGM 0, implicit %4, implicit %5, implicit %6, implicit %7
436 ...
437
438 # FIXME: Handle this
439 ---
440 name: test_unmerge_values_s32_of_concat_vectors_v4s32_v4s32
441 body: |
442 bb.0:
443 ; CHECK-LABEL: name: test_unmerge_values_s32_of_concat_vectors_v4s32_v4s32
444 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
445 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
446 ; CHECK: [[COPY2:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
447 ; CHECK: [[COPY3:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr6_vgpr7
448 ; CHECK: [[CONCAT_VECTORS:%[0-9]+]]:_(<8 x s32>) = G_CONCAT_VECTORS [[COPY]](<2 x s32>), [[COPY1]](<2 x s32>), [[COPY2]](<2 x s32>), [[COPY3]](<2 x s32>)
449 ; CHECK: [[TRUNC:%[0-9]+]]:_(<8 x s16>) = G_TRUNC [[CONCAT_VECTORS]](<8 x s32>)
450 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32), [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[TRUNC]](<8 x s16>)
451 ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32), implicit [[UV2]](s32), implicit [[UV3]](s32)
452 %0:_(<2 x s32>) = COPY $vgpr0_vgpr1
453 %1:_(<2 x s32>) = COPY $vgpr2_vgpr3
454 %2:_(<2 x s32>) = COPY $vgpr4_vgpr5
455 %3:_(<2 x s32>) = COPY $vgpr6_vgpr7
456 %4:_(<8 x s32>) = G_CONCAT_VECTORS %0, %1, %2, %3
457 %5:_(<8 x s16>) = G_TRUNC %4
458 %6:_(s32), %7:_(s32), %8:_(s32), %9:_(s32) = G_UNMERGE_VALUES %5
459 S_ENDPGM 0, implicit %6, implicit %7, implicit %8, implicit %9
460 ...
461
462 ---
463 name: test_unmerge_values_s64_of_build_vector_v4s32
464 body: |
465 bb.0:
466 ; CHECK-LABEL: name: test_unmerge_values_s64_of_build_vector_v4s32
467 ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
468 ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
469 ; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2
470 ; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3
471 ; CHECK: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s32>) = G_BUILD_VECTOR [[COPY]](s32), [[COPY1]](s32), [[COPY2]](s32), [[COPY3]](s32)
472 ; CHECK: [[TRUNC:%[0-9]+]]:_(<4 x s16>) = G_TRUNC [[BUILD_VECTOR]](<4 x s32>)
473 ; CHECK: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[TRUNC]](<4 x s16>)
474 ; CHECK: S_ENDPGM 0, implicit [[UV]](s32), implicit [[UV1]](s32)
475 %0:_(s32) = COPY $vgpr0
476 %1:_(s32) = COPY $vgpr1
477 %2:_(s32) = COPY $vgpr2
478 %3:_(s32) = COPY $vgpr3
479 %4:_(<4 x s32>) = G_BUILD_VECTOR %0, %1, %2, %3
480 %5:_(<4 x s16>) = G_TRUNC %4
481 %6:_(s32), %7:_(s32) = G_UNMERGE_VALUES %5
482 S_ENDPGM 0, implicit %6, implicit %7
483 ...