llvm.org GIT mirror llvm / 65e88c9
Revert "[DAGCombiner] Extending pattern detection for vector shuffle (REAPPLIED)" This reverts commit r310782. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@310822 91177308-0d34-0410-b5e6-96231b3b80d8 Elad Cohen 2 years ago
5 changed file(s) with 118 addition(s) and 109 deletion(s). Raw diff Collapse all Expand all
1418514185 EVT InVT1 = VecIn1.getValueType();
1418614186 EVT InVT2 = VecIn2.getNode() ? VecIn2.getValueType() : InVT1;
1418714187
14188 unsigned Vec2Offset = 0;
14188 unsigned Vec2Offset = InVT1.getVectorNumElements();
1418914189 unsigned NumElems = VT.getVectorNumElements();
1419014190 unsigned ShuffleNumElems = NumElems;
14191
14192 // In case both the input vectors are extracted from same base
14193 // vector we do not need extra addend (Vec2Offset) while
14194 // computing shuffle mask.
14195 if (!VecIn2 || !(VecIn1.getOpcode() == ISD::EXTRACT_SUBVECTOR) ||
14196 !(VecIn2.getOpcode() == ISD::EXTRACT_SUBVECTOR) ||
14197 !(VecIn1.getOperand(0) == VecIn2.getOperand(0)))
14198 Vec2Offset = InVT1.getVectorNumElements();
1419914191
1420014192 // We can't generate a shuffle node with mismatched input and output types.
1420114193 // Try to make the types match the type of the output.
1434314335 if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
1434414336 !isa(Op.getOperand(1)))
1434514337 return SDValue();
14338
1434614339 SDValue ExtractedFromVec = Op.getOperand(0);
1434714340
1434814341 // All inputs must have the same element type as the output.
1436414357 // If we didn't find at least one input vector, bail out.
1436514358 if (VecIn.size() < 2)
1436614359 return SDValue();
14367
14368 // If all the Operands of BUILD_VECTOR extract from same
14369 // vector, then split the vector efficiently based on the maximum
14370 // vector access index and adjust the VectorMask and
14371 // VecIn accordingly.
14372 if (VecIn.size() == 2) {
14373 unsigned MaxIndex = 0;
14374 unsigned NearestPow2 = 0;
14375 SDValue Vec = VecIn.back();
14376 EVT InVT = Vec.getValueType();
14377 MVT IdxTy = TLI.getVectorIdxTy(DAG.getDataLayout());
14378 SmallVector IndexVec(NumElems, 0);
14379
14380 for (unsigned i = 0; i < NumElems; i++) {
14381 if (VectorMask[i] <= 0)
14382 continue;
14383 unsigned Index = N->getOperand(i).getConstantOperandVal(1);
14384 IndexVec[i] = Index;
14385 MaxIndex = std::max(MaxIndex, Index);
14386 }
14387
14388 NearestPow2 = PowerOf2Ceil(MaxIndex);
14389 if (InVT.isSimple() && (NearestPow2 > 2) && ((NumElems * 2) < NearestPow2)) {
14390 unsigned SplitSize = NearestPow2 / 2;
14391 EVT SplitVT = EVT::getVectorVT(*DAG.getContext(),
14392 InVT.getVectorElementType(), SplitSize);
14393 SDValue VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
14394 DAG.getConstant(SplitSize, DL, IdxTy));
14395 SDValue VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, Vec,
14396 DAG.getConstant(0, DL, IdxTy));
14397 VecIn.pop_back();
14398 VecIn.push_back(VecIn1);
14399 VecIn.push_back(VecIn2);
14400
14401 for (unsigned i = 0; i < NumElems; i++)
14402 VectorMask[i] = (IndexVec[i] < SplitSize) ? 1 : 2;
14403 }
14404 }
1440514360
1440614361 // TODO: We want to sort the vectors by descending length, so that adjacent
1440714362 // pairs have similar length, and the longer vector is always first in the
310310 ;
311311 ; AVX512BW-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
312312 ; AVX512BW: # BB#0:
313 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
314 ; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
315 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
316 ; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm2
317 ; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
318 ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
319 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm2
320 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,5,9,14,u,u,u,u,u,u,u,u]
321 ; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u]
322 ; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
323 ; AVX512BW-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
313 ; AVX512BW-NEXT: vpextrb $5, %xmm0, %eax
314 ; AVX512BW-NEXT: vpextrb $1, %xmm0, %ecx
315 ; AVX512BW-NEXT: vmovd %ecx, %xmm1
316 ; AVX512BW-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
317 ; AVX512BW-NEXT: vpextrb $9, %xmm0, %eax
318 ; AVX512BW-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
319 ; AVX512BW-NEXT: vpextrb $13, %xmm0, %eax
320 ; AVX512BW-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
321 ; AVX512BW-NEXT: vextracti32x4 $1, %zmm0, %xmm2
322 ; AVX512BW-NEXT: vpextrb $1, %xmm2, %eax
323 ; AVX512BW-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
324 ; AVX512BW-NEXT: vpextrb $5, %xmm2, %eax
325 ; AVX512BW-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
326 ; AVX512BW-NEXT: vpextrb $9, %xmm2, %eax
327 ; AVX512BW-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
328 ; AVX512BW-NEXT: vpextrb $13, %xmm2, %eax
329 ; AVX512BW-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
330 ; AVX512BW-NEXT: vextracti32x4 $2, %zmm0, %xmm2
331 ; AVX512BW-NEXT: vpextrb $1, %xmm2, %eax
332 ; AVX512BW-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
333 ; AVX512BW-NEXT: vpextrb $5, %xmm2, %eax
334 ; AVX512BW-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
335 ; AVX512BW-NEXT: vpextrb $9, %xmm2, %eax
336 ; AVX512BW-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
337 ; AVX512BW-NEXT: vpextrb $13, %xmm2, %eax
338 ; AVX512BW-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
339 ; AVX512BW-NEXT: vextracti32x4 $3, %zmm0, %xmm0
340 ; AVX512BW-NEXT: vpextrb $1, %xmm0, %eax
341 ; AVX512BW-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
342 ; AVX512BW-NEXT: vpextrb $5, %xmm0, %eax
343 ; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
344 ; AVX512BW-NEXT: vpextrb $9, %xmm0, %eax
345 ; AVX512BW-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
346 ; AVX512BW-NEXT: vpextrb $14, %xmm0, %eax
347 ; AVX512BW-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
324348 ; AVX512BW-NEXT: vzeroupper
325349 ; AVX512BW-NEXT: retq
326350 ;
327351 ; AVX512BWVL-LABEL: trunc_shuffle_v64i8_01_05_09_13_17_21_25_29_33_37_41_45_49_53_57_62:
328352 ; AVX512BWVL: # BB#0:
329 ; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm1
330 ; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
331 ; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
332 ; AVX512BWVL-NEXT: vpshufb %xmm2, %xmm0, %xmm2
333 ; AVX512BWVL-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
334 ; AVX512BWVL-NEXT: vextracti64x4 $1, %zmm0, %ymm0
335 ; AVX512BWVL-NEXT: vextracti128 $1, %ymm0, %xmm2
336 ; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u,u,1,5,9,14,u,u,u,u,u,u,u,u]
337 ; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,u,u,1,5,9,13,u,u,u,u,u,u,u,u]
338 ; AVX512BWVL-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
339 ; AVX512BWVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
353 ; AVX512BWVL-NEXT: vpextrb $5, %xmm0, %eax
354 ; AVX512BWVL-NEXT: vpextrb $1, %xmm0, %ecx
355 ; AVX512BWVL-NEXT: vmovd %ecx, %xmm1
356 ; AVX512BWVL-NEXT: vpinsrb $1, %eax, %xmm1, %xmm1
357 ; AVX512BWVL-NEXT: vpextrb $9, %xmm0, %eax
358 ; AVX512BWVL-NEXT: vpinsrb $2, %eax, %xmm1, %xmm1
359 ; AVX512BWVL-NEXT: vpextrb $13, %xmm0, %eax
360 ; AVX512BWVL-NEXT: vpinsrb $3, %eax, %xmm1, %xmm1
361 ; AVX512BWVL-NEXT: vextracti32x4 $1, %zmm0, %xmm2
362 ; AVX512BWVL-NEXT: vpextrb $1, %xmm2, %eax
363 ; AVX512BWVL-NEXT: vpinsrb $4, %eax, %xmm1, %xmm1
364 ; AVX512BWVL-NEXT: vpextrb $5, %xmm2, %eax
365 ; AVX512BWVL-NEXT: vpinsrb $5, %eax, %xmm1, %xmm1
366 ; AVX512BWVL-NEXT: vpextrb $9, %xmm2, %eax
367 ; AVX512BWVL-NEXT: vpinsrb $6, %eax, %xmm1, %xmm1
368 ; AVX512BWVL-NEXT: vpextrb $13, %xmm2, %eax
369 ; AVX512BWVL-NEXT: vpinsrb $7, %eax, %xmm1, %xmm1
370 ; AVX512BWVL-NEXT: vextracti32x4 $2, %zmm0, %xmm2
371 ; AVX512BWVL-NEXT: vpextrb $1, %xmm2, %eax
372 ; AVX512BWVL-NEXT: vpinsrb $8, %eax, %xmm1, %xmm1
373 ; AVX512BWVL-NEXT: vpextrb $5, %xmm2, %eax
374 ; AVX512BWVL-NEXT: vpinsrb $9, %eax, %xmm1, %xmm1
375 ; AVX512BWVL-NEXT: vpextrb $9, %xmm2, %eax
376 ; AVX512BWVL-NEXT: vpinsrb $10, %eax, %xmm1, %xmm1
377 ; AVX512BWVL-NEXT: vpextrb $13, %xmm2, %eax
378 ; AVX512BWVL-NEXT: vpinsrb $11, %eax, %xmm1, %xmm1
379 ; AVX512BWVL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
380 ; AVX512BWVL-NEXT: vpextrb $1, %xmm0, %eax
381 ; AVX512BWVL-NEXT: vpinsrb $12, %eax, %xmm1, %xmm1
382 ; AVX512BWVL-NEXT: vpextrb $5, %xmm0, %eax
383 ; AVX512BWVL-NEXT: vpinsrb $13, %eax, %xmm1, %xmm1
384 ; AVX512BWVL-NEXT: vpextrb $9, %xmm0, %eax
385 ; AVX512BWVL-NEXT: vpinsrb $14, %eax, %xmm1, %xmm1
386 ; AVX512BWVL-NEXT: vpextrb $14, %xmm0, %eax
387 ; AVX512BWVL-NEXT: vpinsrb $15, %eax, %xmm1, %xmm0
340388 ; AVX512BWVL-NEXT: vzeroupper
341389 ; AVX512BWVL-NEXT: retq
342390 %res = shufflevector <64 x i8> %x, <64 x i8> %x, <16 x i32>
285285 define <4 x i32> @test_v16i32_0_1_2_12 (<16 x i32> %v) {
286286 ; ALL-LABEL: test_v16i32_0_1_2_12:
287287 ; ALL: # BB#0:
288 ; ALL-NEXT: vextracti32x8 $1, %zmm0, %ymm1
289 ; ALL-NEXT: vextracti128 $1, %ymm1, %xmm1
290 ; ALL-NEXT: vpbroadcastd %xmm1, %xmm1
291 ; ALL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[3]
288 ; ALL-NEXT: vpextrd $1, %xmm0, %eax
289 ; ALL-NEXT: vpinsrd $1, %eax, %xmm0, %xmm1
290 ; ALL-NEXT: vpextrd $2, %xmm0, %eax
291 ; ALL-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
292 ; ALL-NEXT: vextracti32x4 $3, %zmm0, %xmm0
293 ; ALL-NEXT: vmovd %xmm0, %eax
294 ; ALL-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
292295 ; ALL-NEXT: vzeroupper
293296 ; ALL-NEXT: retq
294297 %res = shufflevector <16 x i32> %v, <16 x i32> undef, <4 x i32>
27252725 define <2 x i64> @test_v8i64_2_5 (<8 x i64> %v) {
27262726 ; AVX512F-LABEL: test_v8i64_2_5:
27272727 ; AVX512F: # BB#0:
2728 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
2729 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
2728 ; AVX512F-NEXT: vextracti32x4 $2, %zmm0, %xmm1
2729 ; AVX512F-NEXT: vextracti32x4 $1, %zmm0, %xmm0
27302730 ; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
27312731 ; AVX512F-NEXT: vzeroupper
27322732 ; AVX512F-NEXT: retq
27332733 ;
27342734 ; AVX512F-32-LABEL: test_v8i64_2_5:
27352735 ; AVX512F-32: # BB#0:
2736 ; AVX512F-32-NEXT: vextracti64x4 $1, %zmm0, %ymm1
2737 ; AVX512F-32-NEXT: vextracti128 $1, %ymm0, %xmm0
2738 ; AVX512F-32-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
2736 ; AVX512F-32-NEXT: vextracti32x4 $1, %zmm0, %xmm1
2737 ; AVX512F-32-NEXT: vextracti32x4 $2, %zmm0, %xmm0
2738 ; AVX512F-32-NEXT: vpextrd $2, %xmm0, %eax
2739 ; AVX512F-32-NEXT: vpinsrd $2, %eax, %xmm1, %xmm1
2740 ; AVX512F-32-NEXT: vpextrd $3, %xmm0, %eax
2741 ; AVX512F-32-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0
27392742 ; AVX512F-32-NEXT: vzeroupper
27402743 ; AVX512F-32-NEXT: retl
27412744 %res = shufflevector <8 x i64> %v, <8 x i64> undef, <2 x i32>
566566 ; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
567567 ; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2
568568 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
569 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 =
570 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
571 ; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm5
572 ; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3
573 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
574 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
575 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm6
576 ; AVX2-NEXT: vpshufb %xmm5, %xmm6, %xmm7
577 ; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5
578 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
579 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
580 ; AVX2-NEXT: vpcmpeqb %xmm3, %xmm2, %xmm2
581 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 =
582 ; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm5
583 ; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm3
584 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
585 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
586 ; AVX2-NEXT: vpshufb %xmm5, %xmm6, %xmm7
587 ; AVX2-NEXT: vpshufb %xmm5, %xmm0, %xmm5
588 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
589 ; AVX2-NEXT: vpblendd {{.*#+}} xmm3 = xmm5[0,1],xmm3[2,3]
590 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm5 =
591 ; AVX2-NEXT: vpshufb %xmm5, %xmm4, %xmm4
592 ; AVX2-NEXT: vpshufb %xmm5, %xmm1, %xmm1
593 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
594 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
595 ; AVX2-NEXT: vpshufb %xmm4, %xmm6, %xmm5
596 ; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0
569 ; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
570 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 =
571 ; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm5
572 ; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4
573 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
574 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm5
575 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
576 ; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7
577 ; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6
578 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
579 ; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
580 ; AVX2-NEXT: vpcmpeqb %xmm4, %xmm2, %xmm2
581 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 =
582 ; AVX2-NEXT: vpshufb %xmm4, %xmm3, %xmm6
583 ; AVX2-NEXT: vpshufb %xmm4, %xmm1, %xmm4
584 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
585 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 = <2,6,10,14,u,u,u,u,u,u,u,u,u,u,u,u>
586 ; AVX2-NEXT: vpshufb %xmm6, %xmm5, %xmm7
587 ; AVX2-NEXT: vpshufb %xmm6, %xmm0, %xmm6
588 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1]
589 ; AVX2-NEXT: vpblendd {{.*#+}} xmm4 = xmm6[0,1],xmm4[2,3]
590 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm6 =
591 ; AVX2-NEXT: vpshufb %xmm6, %xmm3, %xmm3
592 ; AVX2-NEXT: vpshufb %xmm6, %xmm1, %xmm1
593 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
594 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = <3,7,11,15,u,u,u,u,u,u,u,u,u,u,u,u>
595 ; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5
596 ; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
597597 ; AVX2-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
598598 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
599 ; AVX2-NEXT: vpcmpeqb %xmm0, %xmm3, %xmm0
599 ; AVX2-NEXT: vpcmpeqb %xmm0, %xmm4, %xmm0
600600 ; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
601601 ; AVX2-NEXT: vpand %xmm1, %xmm2, %xmm2
602602 ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
835835 ; AVX512-NEXT: vpmovdw %zmm1, %ymm3
836836 ; AVX512-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
837837 ; AVX512-NEXT: vpmovwb %zmm2, %ymm8
838 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 =
838839 ; AVX512-NEXT: vextracti64x4 $1, %zmm1, %ymm14
839840 ; AVX512-NEXT: vextracti128 $1, %ymm14, %xmm9
840 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm7 =
841841 ; AVX512-NEXT: vpshufb %xmm7, %xmm9, %xmm4
842842 ; AVX512-NEXT: vpshufb %xmm7, %xmm14, %xmm5
843843 ; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
844844 ; AVX512-NEXT: vinserti128 $1, %xmm4, %ymm0, %ymm5
845 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
845846 ; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm10
846 ; AVX512-NEXT: vmovdqa {{.*#+}} xmm3 = <1,5,9,13,u,u,u,u,u,u,u,u,u,u,u,u>
847847 ; AVX512-NEXT: vpshufb %xmm3, %xmm10, %xmm6
848848 ; AVX512-NEXT: vpshufb %xmm3, %xmm1, %xmm4
849849 ; AVX512-NEXT: vpunpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]