llvm.org GIT mirror llvm / fd35058
[DAGCombine] Slightly improve lowering of BUILD_VECTOR into a shuffle. This handles the case of a BUILD_VECTOR being constructed out of elements extracted from a vector twice the size of the result vector. Previously this was always scalarized. Now, we try to construct a shuffle node that feeds on extract_subvectors. This fixes PR15872 and provides a partial fix for PR21711. Differential Revision: http://reviews.llvm.org/D6678 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@224429 91177308-0d34-0410-b5e6-96231b3b80d8 Michael Kuperstein 5 years ago
6 changed file(s) with 156 addition(s) and 11 deletion(s). Raw diff Collapse all Expand all
15301530 Type *Ty) const {
15311531 return false;
15321532 }
1533
1534 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
1535 /// with this index. This is needed because EXTRACT_SUBVECTOR usually
1536 /// has custom lowering that depends on the index of the first element,
1537 /// and only the target knows which lowering is cheap.
1538 virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const {
1539 return false;
1540 }
1541
15331542 //===--------------------------------------------------------------------===//
15341543 // Runtime Library hooks
15351544 //
1078210782 SDValue ExtVal = Extract.getOperand(1);
1078310783 unsigned ExtIndex = cast(ExtVal)->getZExtValue();
1078410784 if (Extract.getOperand(0) == VecIn1) {
10785 if (ExtIndex > VT.getVectorNumElements())
10786 return SDValue();
10787
1078810785 Mask.push_back(ExtIndex);
1078910786 continue;
1079010787 }
1080410801 if (VecIn2.getNode())
1080510802 return SDValue();
1080610803
10807 // We only support widening of vectors which are half the size of the
10808 // output registers. For example XMM->YMM widening on X86 with AVX.
10809 if (VecIn1.getValueType().getSizeInBits()*2 != VT.getSizeInBits())
10810 return SDValue();
10811
1081210804 // If the input vector type has a different base type to the output
1081310805 // vector type, bail out.
1081410806 if (VecIn1.getValueType().getVectorElementType() !=
1081510807 VT.getVectorElementType())
1081610808 return SDValue();
1081710809
10818 // Widen the input vector by adding undef values.
10819 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
10820 VecIn1, DAG.getUNDEF(VecIn1.getValueType()));
10810 // If the input vector is too small, widen it.
10811 // We only support widening of vectors which are half the size of the
10812 // output registers. For example XMM->YMM widening on X86 with AVX.
10813 EVT VecInT = VecIn1.getValueType();
10814 if (VecInT.getSizeInBits() * 2 == VT.getSizeInBits()) {
10815 // Widen the input vector by adding undef values.
10816 VecIn1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
10817 VecIn1, DAG.getUNDEF(VecIn1.getValueType()));
10818 } else if (VecInT.getSizeInBits() == VT.getSizeInBits() * 2) {
10819 // If the input vector is too large, try to split it.
10820 if (!TLI.isExtractSubvectorCheap(VT, VT.getVectorNumElements()))
10821 return SDValue();
10822
10823 // Try to replace VecIn1 with two extract_subvectors
10824 // No need to update the masks, they should still be correct.
10825 VecIn2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
10826 DAG.getConstant(VT.getVectorNumElements(), TLI.getVectorIdxTy()));
10827 VecIn1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, VecIn1,
10828 DAG.getConstant(0, TLI.getVectorIdxTy()));
10829 UsesZeroVector = false;
10830 } else
10831 return SDValue();
1082110832 }
1082210833
1082310834 if (UsesZeroVector)
38483848 if (BitSize == 0 || BitSize > 64)
38493849 return false;
38503850 return true;
3851 }
3852
3853 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3854 unsigned Index) const {
3855 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3856 return false;
3857
3858 return (Index == 0 || Index == ResVT.getVectorNumElements());
38513859 }
38523860
38533861 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
790790 bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
791791 Type *Ty) const override;
792792
793 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
794 /// with this index.
795 bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const override;
796
793797 /// Intel processors have a unified instruction and data cache
794798 const char * getClearCacheBuiltinName() const override {
795799 return nullptr; // nothing to do, move along.
0 target triple = "x86_64-unknown-unknown"
1
2 ; RUN: llc < %s -march=x86-64 -mattr=+avx | FileCheck %s
3
4 ; When extracting multiple consecutive elements from a larger
5 ; vector into a smaller one, do it efficiently. We should use
6 ; an EXTRACT_SUBVECTOR node internally rather than a bunch of
7 ; single element extractions.
8
9 ; Extracting the low elements only requires using the right kind of store.
10 define void @low_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
11 %ext0 = extractelement <8 x float> %v, i32 0
12 %ext1 = extractelement <8 x float> %v, i32 1
13 %ext2 = extractelement <8 x float> %v, i32 2
14 %ext3 = extractelement <8 x float> %v, i32 3
15 %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
16 %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
17 %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
18 %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
19 store <4 x float> %ins3, <4 x float>* %ptr, align 16
20 ret void
21
22 ; CHECK-LABEL: low_v8f32_to_v4f32
23 ; CHECK: vmovaps
24 ; CHECK-NEXT: vzeroupper
25 ; CHECK-NEXT: retq
26 }
27
28 ; Extracting the high elements requires just one AVX instruction.
29 define void @high_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
30 %ext0 = extractelement <8 x float> %v, i32 4
31 %ext1 = extractelement <8 x float> %v, i32 5
32 %ext2 = extractelement <8 x float> %v, i32 6
33 %ext3 = extractelement <8 x float> %v, i32 7
34 %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
35 %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
36 %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
37 %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
38 store <4 x float> %ins3, <4 x float>* %ptr, align 16
39 ret void
40
41 ; CHECK-LABEL: high_v8f32_to_v4f32
42 ; CHECK: vextractf128
43 ; CHECK-NEXT: vzeroupper
44 ; CHECK-NEXT: retq
45 }
46
47 ; Make sure element type doesn't alter the codegen. Note that
48 ; if we were actually using the vector in this function and
49 ; have AVX2, we should generate vextracti128 (the int version).
50 define void @high_v8i32_to_v4i32(<8 x i32> %v, <4 x i32>* %ptr) {
51 %ext0 = extractelement <8 x i32> %v, i32 4
52 %ext1 = extractelement <8 x i32> %v, i32 5
53 %ext2 = extractelement <8 x i32> %v, i32 6
54 %ext3 = extractelement <8 x i32> %v, i32 7
55 %ins0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
56 %ins1 = insertelement <4 x i32> %ins0, i32 %ext1, i32 1
57 %ins2 = insertelement <4 x i32> %ins1, i32 %ext2, i32 2
58 %ins3 = insertelement <4 x i32> %ins2, i32 %ext3, i32 3
59 store <4 x i32> %ins3, <4 x i32>* %ptr, align 16
60 ret void
61
62 ; CHECK-LABEL: high_v8i32_to_v4i32
63 ; CHECK: vextractf128
64 ; CHECK-NEXT: vzeroupper
65 ; CHECK-NEXT: retq
66 }
67
68 ; Make sure that element size doesn't alter the codegen.
69 define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
70 %ext0 = extractelement <4 x double> %v, i32 2
71 %ext1 = extractelement <4 x double> %v, i32 3
72 %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
73 %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
74 store <2 x double> %ins1, <2 x double>* %ptr, align 16
75 ret void
76
77 ; CHECK-LABEL: high_v4f64_to_v2f64
78 ; CHECK: vextractf128
79 ; CHECK-NEXT: vzeroupper
80 ; CHECK-NEXT: retq
81 }
15511551 ret <4 x i32> %2
15521552 }
15531553
1554 define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
1555 ; SSE-LABEL: combine_test21:
1556 ; SSE: # BB#0:
1557 ; SSE-NEXT: movdqa %xmm0, %xmm2
1558 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
1559 ; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1560 ; SSE-NEXT: movdqa %xmm2,
1561 ; SSE-NEXT: retq
1562 ;
1563 ; AVX1-LABEL: combine_test21:
1564 ; AVX1: # BB#0:
1565 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
1566 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm0[0],xmm1[0]
1567 ; AVX1-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1568 ; AVX1-NEXT: movdqa %xmm2,
1569 ; AVX1-NEXT: vzeroupper
1570 ; AVX1-NEXT: retq
1571 ;
1572 ; AVX2-LABEL: combine_test21:
1573 ; AVX2: # BB#0:
1574 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1575 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm0[0],xmm1[0]
1576 ; AVX2-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
1577 ; AVX2-NEXT: movdqa %xmm2,
1578 ; AVX2-NEXT: vzeroupper
1579 ; AVX2-NEXT: retq
1580 %1 = shufflevector <8 x i32> %a, <8 x i32> %a, <4 x i32>
1581 %2 = shufflevector <8 x i32> %a, <8 x i32> %a, <4 x i32>
1582 store <4 x i32> %1, <4 x i32>* %ptr, align 16
1583 ret <4 x i32> %2
1584 }
15541585
15551586 ; Check some negative cases.
15561587 ; FIXME: Do any of these really make sense? Are they redundant with the above tests?