llvm.org GIT mirror llvm / ba2e8a1
Merging r370404: ------------------------------------------------------------------------ r370404 | rksimon | 2019-08-29 22:22:08 +0200 (Thu, 29 Aug 2019) | 3 lines [X86][SSE] combinePMULDQ - pmuldq(x, 0) -> zero vector (PR43159) ISD::isBuildVectorAllZeros permits undef elements to be present, which means we can't return it as a zero vector. PMULDQ/PMULUDQ is an extending multiply so a multiply by zero of the lower 32-bits should result in a zero 64-bit element. ------------------------------------------------------------------------ git-svn-id: https://llvm.org/svn/llvm-project/llvm/branches/release_90@370445 91177308-0d34-0410-b5e6-96231b3b80d8 Hans Wennborg 1 year, 3 months ago
2 changed file(s) with 120 addition(s) and 3 deletion(s). Raw diff Collapse all Expand all
4410344103
4410444104 // Simplify PMULDQ and PMULUDQ operations.
4410544105 static SDValue combinePMULDQ(SDNode *N, SelectionDAG &DAG,
44106 TargetLowering::DAGCombinerInfo &DCI) {
44106 TargetLowering::DAGCombinerInfo &DCI,
44107 const X86Subtarget &Subtarget) {
4410744108 SDValue LHS = N->getOperand(0);
4410844109 SDValue RHS = N->getOperand(1);
4410944110
4411344114 return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0), RHS, LHS);
4411444115
4411544116 // Multiply by zero.
44117 // Don't return RHS as it may contain UNDEFs.
4411644118 if (ISD::isBuildVectorAllZeros(RHS.getNode()))
44117 return RHS;
44119 return getZeroVector(N->getSimpleValueType(0), Subtarget, DAG, SDLoc(N));
4411844120
4411944121 // Aggressively peek through ops to get at the demanded low bits.
4412044122 APInt DemandedMask = APInt::getLowBitsSet(64, 32);
4432244324 case X86ISD::PCMPEQ:
4432344325 case X86ISD::PCMPGT: return combineVectorCompare(N, DAG, Subtarget);
4432444326 case X86ISD::PMULDQ:
44325 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI);
44327 case X86ISD::PMULUDQ: return combinePMULDQ(N, DAG, DCI, Subtarget);
4432644328 }
4432744329
4432844330 return SDValue();
171171 %tmp35 = add <4 x i64> %tmp29, %tmp28
172172 ret void
173173 }
174
175 define i32 @PR43159(<4 x i32>* %a0) {
176 ; SSE-LABEL: PR43159:
177 ; SSE: # %bb.0: # %entry
178 ; SSE-NEXT: movdqa (%rdi), %xmm0
179 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
180 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
181 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
182 ; SSE-NEXT: pmuludq %xmm2, %xmm3
183 ; SSE-NEXT: movdqa %xmm0, %xmm2
184 ; SSE-NEXT: psrld $1, %xmm2
185 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5],xmm0[6,7]
186 ; SSE-NEXT: pmuludq %xmm1, %xmm2
187 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,3,3]
188 ; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,3],xmm1[4,5],xmm3[6,7]
189 ; SSE-NEXT: psubd %xmm1, %xmm0
190 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
191 ; SSE-NEXT: pmuludq {{.*}}(%rip), %xmm0
192 ; SSE-NEXT: pxor %xmm2, %xmm2
193 ; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5],xmm0[6,7]
194 ; SSE-NEXT: paddd %xmm1, %xmm2
195 ; SSE-NEXT: movdqa %xmm2, %xmm0
196 ; SSE-NEXT: psrld $7, %xmm0
197 ; SSE-NEXT: psrld $6, %xmm2
198 ; SSE-NEXT: movd %xmm2, %edi
199 ; SSE-NEXT: pextrd $1, %xmm0, %esi
200 ; SSE-NEXT: pextrd $2, %xmm2, %edx
201 ; SSE-NEXT: pextrd $3, %xmm0, %ecx
202 ; SSE-NEXT: jmp foo # TAILCALL
203 ;
204 ; AVX2-LABEL: PR43159:
205 ; AVX2: # %bb.0: # %entry
206 ; AVX2-NEXT: vmovdqa (%rdi), %xmm0
207 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
208 ; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
209 ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
210 ; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
211 ; AVX2-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
212 ; AVX2-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
213 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
214 ; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
215 ; AVX2-NEXT: vpsubd %xmm1, %xmm0, %xmm0
216 ; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
217 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
218 ; AVX2-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
219 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
220 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
221 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
222 ; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
223 ; AVX2-NEXT: vmovd %xmm0, %edi
224 ; AVX2-NEXT: vpextrd $1, %xmm0, %esi
225 ; AVX2-NEXT: vpextrd $2, %xmm0, %edx
226 ; AVX2-NEXT: vpextrd $3, %xmm0, %ecx
227 ; AVX2-NEXT: jmp foo # TAILCALL
228 ;
229 ; AVX512VL-LABEL: PR43159:
230 ; AVX512VL: # %bb.0: # %entry
231 ; AVX512VL-NEXT: vmovdqa (%rdi), %xmm0
232 ; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
233 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
234 ; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
235 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
236 ; AVX512VL-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
237 ; AVX512VL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
238 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
239 ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
240 ; AVX512VL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
241 ; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
242 ; AVX512VL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
243 ; AVX512VL-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
244 ; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
245 ; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
246 ; AVX512VL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
247 ; AVX512VL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
248 ; AVX512VL-NEXT: vmovd %xmm0, %edi
249 ; AVX512VL-NEXT: vpextrd $1, %xmm0, %esi
250 ; AVX512VL-NEXT: vpextrd $2, %xmm0, %edx
251 ; AVX512VL-NEXT: vpextrd $3, %xmm0, %ecx
252 ; AVX512VL-NEXT: jmp foo # TAILCALL
253 ;
254 ; AVX512DQVL-LABEL: PR43159:
255 ; AVX512DQVL: # %bb.0: # %entry
256 ; AVX512DQVL-NEXT: vmovdqa (%rdi), %xmm0
257 ; AVX512DQVL-NEXT: vmovdqa {{.*#+}} xmm1 = [1645975491,344322273,2164392969,1916962805]
258 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
259 ; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm3
260 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[1,1,3,3]
261 ; AVX512DQVL-NEXT: vpmuludq %xmm2, %xmm4, %xmm2
262 ; AVX512DQVL-NEXT: vpmuludq %xmm1, %xmm3, %xmm1
263 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
264 ; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
265 ; AVX512DQVL-NEXT: vpsubd %xmm1, %xmm0, %xmm0
266 ; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
267 ; AVX512DQVL-NEXT: vpbroadcastd {{.*#+}} xmm2 = [2147483648,2147483648,2147483648,2147483648]
268 ; AVX512DQVL-NEXT: vpmuludq %xmm2, %xmm0, %xmm0
269 ; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
270 ; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3]
271 ; AVX512DQVL-NEXT: vpaddd %xmm1, %xmm0, %xmm0
272 ; AVX512DQVL-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
273 ; AVX512DQVL-NEXT: vmovd %xmm0, %edi
274 ; AVX512DQVL-NEXT: vpextrd $1, %xmm0, %esi
275 ; AVX512DQVL-NEXT: vpextrd $2, %xmm0, %edx
276 ; AVX512DQVL-NEXT: vpextrd $3, %xmm0, %ecx
277 ; AVX512DQVL-NEXT: jmp foo # TAILCALL
278 entry:
279 %0 = load <4 x i32>, <4 x i32>* %a0, align 16
280 %div = udiv <4 x i32> %0,
281 %ext0 = extractelement <4 x i32> %div, i32 0
282 %ext1 = extractelement <4 x i32> %div, i32 1
283 %ext2 = extractelement <4 x i32> %div, i32 2
284 %ext3 = extractelement <4 x i32> %div, i32 3
285 %call = tail call i32 @foo(i32 %ext0, i32 %ext1, i32 %ext2, i32 %ext3)
286 ret i32 %call
287 }
288 declare dso_local i32 @foo(i32, i32, i32, i32)