llvm.org GIT mirror llvm / 061426e
[X86] Add support for printing shuffle comments for VALIGN instructions. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@284915 91177308-0d34-0410-b5e6-96231b3b80d8 Craig Topper 3 years ago
5 changed file(s) with 68 addition(s) and 3 deletion(s). Raw diff Collapse all Expand all
254254 CASE_MASKZ_UNPCK(UNPCKLPS, r)
255255 CASE_MASKZ_SHUF(PALIGNR, r)
256256 CASE_MASKZ_SHUF(PALIGNR, m)
257 CASE_MASKZ_SHUF(ALIGNQ, r)
258 CASE_MASKZ_SHUF(ALIGNQ, m)
259 CASE_MASKZ_SHUF(ALIGND, r)
260 CASE_MASKZ_SHUF(ALIGND, m)
257261 CASE_MASKZ_SHUF(SHUFPD, m)
258262 CASE_MASKZ_SHUF(SHUFPD, r)
259263 CASE_MASKZ_SHUF(SHUFPS, m)
339343 CASE_MASK_UNPCK(UNPCKLPS, r)
340344 CASE_MASK_SHUF(PALIGNR, r)
341345 CASE_MASK_SHUF(PALIGNR, m)
346 CASE_MASK_SHUF(ALIGNQ, r)
347 CASE_MASK_SHUF(ALIGNQ, m)
348 CASE_MASK_SHUF(ALIGND, r)
349 CASE_MASK_SHUF(ALIGND, m)
342350 CASE_MASK_SHUF(SHUFPD, m)
343351 CASE_MASK_SHUF(SHUFPD, r)
344352 CASE_MASK_SHUF(SHUFPS, m)
619627 ShuffleMask);
620628 break;
621629
630 CASE_AVX512_INS_COMMON(ALIGNQ, Z, rri)
631 CASE_AVX512_INS_COMMON(ALIGNQ, Z256, rri)
632 CASE_AVX512_INS_COMMON(ALIGNQ, Z128, rri)
633 Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
634 RegForm = true;
635 LLVM_FALLTHROUGH;
636
637 CASE_AVX512_INS_COMMON(ALIGNQ, Z, rmi)
638 CASE_AVX512_INS_COMMON(ALIGNQ, Z256, rmi)
639 CASE_AVX512_INS_COMMON(ALIGNQ, Z128, rmi)
640 Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
641 DestName = getRegName(MI->getOperand(0).getReg());
642 if (MI->getOperand(NumOperands - 1).isImm())
643 DecodeVALIGNMask(getRegOperandVectorVT(MI, MVT::i64, 0),
644 MI->getOperand(NumOperands - 1).getImm(),
645 ShuffleMask);
646 break;
647
648 CASE_AVX512_INS_COMMON(ALIGND, Z, rri)
649 CASE_AVX512_INS_COMMON(ALIGND, Z256, rri)
650 CASE_AVX512_INS_COMMON(ALIGND, Z128, rri)
651 Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
652 RegForm = true;
653 LLVM_FALLTHROUGH;
654
655 CASE_AVX512_INS_COMMON(ALIGND, Z, rmi)
656 CASE_AVX512_INS_COMMON(ALIGND, Z256, rmi)
657 CASE_AVX512_INS_COMMON(ALIGND, Z128, rmi)
658 Src2Name = getRegName(MI->getOperand(NumOperands-(RegForm?3:7)).getReg());
659 DestName = getRegName(MI->getOperand(0).getReg());
660 if (MI->getOperand(NumOperands - 1).isImm())
661 DecodeVALIGNMask(getRegOperandVectorVT(MI, MVT::i32, 0),
662 MI->getOperand(NumOperands - 1).getImm(),
663 ShuffleMask);
664 break;
665
622666 CASE_SHUF(PSHUFD, ri)
623667 Src1Name = getRegName(MI->getOperand(NumOperands - 2).getReg());
624668 LLVM_FALLTHROUGH;
148148 ShuffleMask.push_back(Base + l);
149149 }
150150 }
151 }
152
153 void DecodeVALIGNMask(MVT VT, unsigned Imm,
154 SmallVectorImpl &ShuffleMask) {
155 int NumElts = VT.getVectorNumElements();
156 // Not all bits of the immediate are used so mask it.
157 assert(isPowerOf2_32(NumElts) && "NumElts should be power of 2");
158 Imm = Imm & (NumElts - 1);
159 for (int i = 0; i != NumElts; ++i)
160 ShuffleMask.push_back(i + Imm);
151161 }
152162
153163 /// DecodePSHUFMask - This decodes the shuffle masks for pshufw, pshufd, and vpermilp*.
5353 void DecodePSRLDQMask(MVT VT, unsigned Imm, SmallVectorImpl &ShuffleMask);
5454
5555 void DecodePALIGNRMask(MVT VT, unsigned Imm, SmallVectorImpl &ShuffleMask);
56
57 void DecodeVALIGNMask(MVT VT, unsigned Imm, SmallVectorImpl &ShuffleMask);
5658
5759 /// Decodes the shuffle masks for pshufd/pshufw/vpermilpd/vpermilps.
5860 /// VT indicates the type of the vector allowing it to handle different
845845 define <8 x i64> @test_valign_q(<8 x i64> %a, <8 x i64> %b) {
846846 ; CHECK-LABEL: test_valign_q:
847847 ; CHECK: ## BB#0:
848 ; CHECK-NEXT: valignq $2, %zmm1, %zmm0, %zmm0
848 ; CHECK-NEXT: valignq {{.*#+}} zmm0 = zmm1[2,3,4,5,6,7],zmm0[0,1]
849849 ; CHECK-NEXT: retq
850850 %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> zeroinitializer, i8 -1)
851851 ret <8 x i64> %res
855855 ; CHECK-LABEL: test_mask_valign_q:
856856 ; CHECK: ## BB#0:
857857 ; CHECK-NEXT: kmovw %edi, %k1
858 ; CHECK-NEXT: valignq $2, %zmm1, %zmm0, %zmm2 {%k1}
858 ; CHECK-NEXT: valignq {{.*#+}} zmm2 {%k1} = zmm1[2,3,4,5,6,7],zmm0[0,1]
859859 ; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
860860 ; CHECK-NEXT: retq
861861 %res = call <8 x i64> @llvm.x86.avx512.mask.valign.q.512(<8 x i64> %a, <8 x i64> %b, i32 2, <8 x i64> %src, i8 %mask)
868868 ; CHECK-LABEL: test_maskz_valign_d:
869869 ; CHECK: ## BB#0:
870870 ; CHECK-NEXT: kmovw %edi, %k1
871 ; CHECK-NEXT: valignd $5, %zmm1, %zmm0, %zmm0 {%k1} {z}
871 ; CHECK-NEXT: valignd {{.*#+}} zmm0 {%k1} {z} = zmm1[5,6,7,8,9,10,11,12,13,14,15],zmm0[0,1,2,3,4]
872872 ; CHECK-NEXT: retq
873873 %res = call <16 x i32> @llvm.x86.avx512.mask.valign.d.512(<16 x i32> %a, <16 x i32> %b, i32 5, <16 x i32> zeroinitializer, i16 %mask)
874874 ret <16 x i32> %res
39453945 ; CHECK: ## BB#0:
39463946 ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
39473947 ; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x09,0x03,0xd1,0x16]
3948 ; CHECK-NEXT: ## xmm2 {%k1} = xmm1[2,3],xmm0[0,1]
39483949 ; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm3 {%k1} {z} ## encoding: [0x62,0xf3,0x7d,0x89,0x03,0xd9,0x16]
3950 ; CHECK-NEXT: ## xmm3 {%k1} {z} = xmm1[2,3],xmm0[0,1]
39493951 ; CHECK-NEXT: valignd $22, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0x7d,0x08,0x03,0xc1,0x16]
3952 ; CHECK-NEXT: ## xmm0 = xmm1[2,3],xmm0[0,1]
39503953 ; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0 ## encoding: [0x62,0xf1,0x6d,0x08,0xfe,0xc0]
39513954 ; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0xfe,0xc3]
39523955 ; CHECK-NEXT: retq ## encoding: [0xc3]
39653968 ; CHECK: ## BB#0:
39663969 ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
39673970 ; CHECK-NEXT: valignd $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0x7d,0x29,0x03,0xd1,0x16]
3971 ; CHECK-NEXT: ## ymm2 {%k1} = ymm1[6,7],ymm0[0,1,2,3,4,5]
39683972 ; CHECK-NEXT: valignd $22, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0x7d,0x28,0x03,0xc1,0x16]
3973 ; CHECK-NEXT: ## ymm0 = ymm1[6,7],ymm0[0,1,2,3,4,5]
39693974 ; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0x6d,0x28,0xfe,0xc0]
39703975 ; CHECK-NEXT: retq ## encoding: [0xc3]
39713976 %res = call <8 x i32> @llvm.x86.avx512.mask.valign.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22, <8 x i32> %x3, i8 %x4)
39813986 ; CHECK: ## BB#0:
39823987 ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
39833988 ; CHECK-NEXT: valignq $22, %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x09,0x03,0xd1,0x16]
3989 ; CHECK-NEXT: ## xmm2 {%k1} = xmm1[0,1]
39843990 ; CHECK-NEXT: valignq $22, %xmm1, %xmm0, %xmm0 ## encoding: [0x62,0xf3,0xfd,0x08,0x03,0xc1,0x16]
3991 ; CHECK-NEXT: ## xmm0 = xmm1[0,1]
39853992 ; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0 ## encoding: [0x62,0xf1,0xed,0x08,0xd4,0xc0]
39863993 ; CHECK-NEXT: retq ## encoding: [0xc3]
39873994 %res = call <2 x i64> @llvm.x86.avx512.mask.valign.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22, <2 x i64> %x3, i8 %x4)
39974004 ; CHECK: ## BB#0:
39984005 ; CHECK-NEXT: kmovw %edi, %k1 ## encoding: [0xc5,0xf8,0x92,0xcf]
39994006 ; CHECK-NEXT: valignq $22, %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf3,0xfd,0x29,0x03,0xd1,0x16]
4007 ; CHECK-NEXT: ## ymm2 {%k1} = ymm1[2,3],ymm0[0,1]
40004008 ; CHECK-NEXT: valignq $22, %ymm1, %ymm0, %ymm0 ## encoding: [0x62,0xf3,0xfd,0x28,0x03,0xc1,0x16]
4009 ; CHECK-NEXT: ## ymm0 = ymm1[2,3],ymm0[0,1]
40014010 ; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0 ## encoding: [0x62,0xf1,0xed,0x28,0xd4,0xc0]
40024011 ; CHECK-NEXT: retq ## encoding: [0xc3]
40034012 %res = call <4 x i64> @llvm.x86.avx512.mask.valign.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22, <4 x i64> %x3, i8 %x4)