llvm.org GIT mirror llvm / 3160d05
[X86][SSE] Add support for combining AND bitmasks to shuffles. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288365 91177308-0d34-0410-b5e6-96231b3b80d8 Simon Pilgrim 3 years ago
7 changed file(s) with 102 addition(s) and 53 deletion(s). Raw diff Collapse all Expand all
2988829888 SDValue N1 = N->getOperand(1);
2988929889 SDLoc DL(N);
2989029890
29891 // Attempt to recursively combine a bitmask AND with shuffles.
29892 if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
29893 SDValue Op(N, 0);
29894 SmallVector NonceMask; // Just a placeholder.
29895 NonceMask.push_back(0);
29896 if (combineX86ShufflesRecursively({Op}, 0, Op, NonceMask,
29897 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
29898 DCI, Subtarget))
29899 return SDValue(); // This routine will use CombineTo to replace N.
29900 }
29901
2989129902 // Create BEXTR instructions
2989229903 // BEXTR is ((X >> imm) & (2**size-1))
2989329904 if (VT != MVT::i32 && VT != MVT::i64)
1212 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
1313 ; SSE-NEXT: retq
1414 ;
15 ; AVX-LABEL: _clearupper2xi64a:
16 ; AVX: # BB#0:
17 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
18 ; AVX-NEXT: retq
15 ; AVX1-LABEL: _clearupper2xi64a:
16 ; AVX1: # BB#0:
17 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
18 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
19 ; AVX1-NEXT: retq
20 ;
21 ; AVX2-LABEL: _clearupper2xi64a:
22 ; AVX2: # BB#0:
23 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
24 ; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
25 ; AVX2-NEXT: retq
1926 %x0 = extractelement <2 x i64> %0, i32 0
2027 %x1 = extractelement <2 x i64> %0, i32 1
2128 %trunc0 = trunc i64 %x0 to i32
3542 ;
3643 ; AVX1-LABEL: _clearupper4xi32a:
3744 ; AVX1: # BB#0:
38 ; AVX1-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
45 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
46 ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
3947 ; AVX1-NEXT: retq
4048 ;
4149 ; AVX2-LABEL: _clearupper4xi32a:
1515 ; X32-NEXT: shrl $14, %eax
1616 ; X32-NEXT: movzbl %al, %eax
1717 ; X32-NEXT: vmovd %eax, %xmm0
18 ; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
19 ; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
18 ; X32-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
2019 ; X32-NEXT: vpextrd $1, %xmm0, %ebp
2120 ; X32-NEXT: xorl %ecx, %ecx
2221 ; X32-NEXT: vmovd %xmm0, %esi
5453 ; X64-NEXT: shrl $14, %eax
5554 ; X64-NEXT: movzbl %al, %eax
5655 ; X64-NEXT: vmovd %eax, %xmm0
57 ; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,0,1,1]
58 ; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
56 ; X64-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
5957 ; X64-NEXT: vpextrd $1, %xmm0, %r8d
6058 ; X64-NEXT: xorl %esi, %esi
6159 ; X64-NEXT: vmovd %xmm0, %r9d
572572 ;
573573 ; AVX1-LABEL: uitofp_4i32_to_2f64:
574574 ; AVX1: # BB#0:
575 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
575 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
576 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
576577 ; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
577578 ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
578579 ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
872873 ;
873874 ; AVX1-LABEL: uitofp_4i32_to_4f64:
874875 ; AVX1: # BB#0:
875 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
876 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
877 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
876878 ; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
877879 ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
878880 ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
32553257 ; AVX1-LABEL: uitofp_load_4i32_to_4f64:
32563258 ; AVX1: # BB#0:
32573259 ; AVX1-NEXT: vmovdqa (%rdi), %xmm0
3258 ; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm1
3260 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
3261 ; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
32593262 ; AVX1-NEXT: vcvtdq2pd %xmm1, %ymm1
32603263 ; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
32613264 ; AVX1-NEXT: vcvtdq2pd %xmm0, %ymm0
1010 ; RUN: llc < %s -mtriple=x86_64 -enable-unsafe-fp-math -mattr=+avx512vl \
1111 ; RUN: | FileCheck %s --check-prefix=CHECK --check-prefix=AVX512VL
1212
13 ; CST: [[MASKCSTADDR:.LCPI[0-9_]+]]:
14 ; CST-NEXT: .long 65535 # 0xffff
15 ; CST-NEXT: .long 65535 # 0xffff
16 ; CST-NEXT: .long 65535 # 0xffff
17 ; CST-NEXT: .long 65535 # 0xffff
13 ; SSE2: [[MASKCSTADDR:.LCPI[0-9_]+]]:
14 ; SSE2-NEXT: .long 65535 # 0xffff
15 ; SSE2-NEXT: .long 65535 # 0xffff
16 ; SSE2-NEXT: .long 65535 # 0xffff
17 ; SSE2-NEXT: .long 65535 # 0xffff
1818
1919 ; CST: [[FPMASKCSTADDR:.LCPI[0-9_]+]]:
2020 ; CST-NEXT: .long 1199570944 # float 65536
2929 ; AVX2-NEXT: .long 65535 # 0xffff
3030
3131 define <4 x float> @test_uitofp_v4i32_to_v4f32(<4 x i32> %arg) {
32 ; SSE-LABEL: test_uitofp_v4i32_to_v4f32:
33 ; SSE: # BB#0:
34 ; SSE-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
35 ; SSE-NEXT: andps %xmm0, %xmm1
36 ; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
37 ; SSE-NEXT: psrld $16, %xmm0
38 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
39 ; SSE-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
40 ; SSE-NEXT: addps %xmm1, %xmm0
41 ; SSE-NEXT: retq
32 ; SSE2-LABEL: test_uitofp_v4i32_to_v4f32:
33 ; SSE2: # BB#0:
34 ; SSE2-NEXT: movaps {{.*#+}} xmm1 = [65535,65535,65535,65535]
35 ; SSE2-NEXT: andps %xmm0, %xmm1
36 ; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
37 ; SSE2-NEXT: psrld $16, %xmm0
38 ; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
39 ; SSE2-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
40 ; SSE2-NEXT: addps %xmm1, %xmm0
41 ; SSE2-NEXT: retq
42 ;
43 ; SSE41-LABEL: test_uitofp_v4i32_to_v4f32:
44 ; SSE41: # BB#0:
45 ; SSE41-NEXT: pxor %xmm1, %xmm1
46 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
47 ; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
48 ; SSE41-NEXT: psrld $16, %xmm0
49 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
50 ; SSE41-NEXT: mulps [[FPMASKCSTADDR]](%rip), %xmm0
51 ; SSE41-NEXT: addps %xmm1, %xmm0
52 ; SSE41-NEXT: retq
4253 ;
4354 ; AVX-LABEL: test_uitofp_v4i32_to_v4f32:
4455 ; AVX: # BB#0:
45 ; AVX-NEXT: vandps [[MASKCSTADDR]](%rip), %xmm0, %xmm1
56 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
57 ; AVX-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],xmm1[1],xmm0[2],xmm1[3],xmm0[4],xmm1[5],xmm0[6],xmm1[7]
4658 ; AVX-NEXT: vcvtdq2ps %xmm1, %xmm1
4759 ; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
4860 ; AVX-NEXT: vcvtdq2ps %xmm0, %xmm0
96108 ; AVX2-NEXT: .long 65535 # 0xffff
97109
98110 define <8 x float> @test_uitofp_v8i32_to_v8f32(<8 x i32> %arg) {
99 ; SSE-LABEL: test_uitofp_v8i32_to_v8f32:
100 ; SSE: # BB#0:
101 ; SSE-NEXT: movdqa %xmm0, %xmm2
102 ; SSE-NEXT: psrld $16, %xmm2
103 ; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
104 ; SSE-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
105 ; SSE-NEXT: mulps %xmm3, %xmm2
106 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
107 ; SSE-NEXT: pand %xmm4, %xmm0
108 ; SSE-NEXT: cvtdq2ps %xmm0, %xmm0
109 ; SSE-NEXT: addps %xmm2, %xmm0
110 ; SSE-NEXT: movdqa %xmm1, %xmm2
111 ; SSE-NEXT: psrld $16, %xmm2
112 ; SSE-NEXT: cvtdq2ps %xmm2, %xmm2
113 ; SSE-NEXT: mulps %xmm3, %xmm2
114 ; SSE-NEXT: pand %xmm4, %xmm1
115 ; SSE-NEXT: cvtdq2ps %xmm1, %xmm1
116 ; SSE-NEXT: addps %xmm2, %xmm1
117 ; SSE-NEXT: retq
111 ; SSE2-LABEL: test_uitofp_v8i32_to_v8f32:
112 ; SSE2: # BB#0:
113 ; SSE2-NEXT: movdqa %xmm0, %xmm2
114 ; SSE2-NEXT: psrld $16, %xmm2
115 ; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
116 ; SSE2-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
117 ; SSE2-NEXT: mulps %xmm3, %xmm2
118 ; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,65535]
119 ; SSE2-NEXT: pand %xmm4, %xmm0
120 ; SSE2-NEXT: cvtdq2ps %xmm0, %xmm0
121 ; SSE2-NEXT: addps %xmm2, %xmm0
122 ; SSE2-NEXT: movdqa %xmm1, %xmm2
123 ; SSE2-NEXT: psrld $16, %xmm2
124 ; SSE2-NEXT: cvtdq2ps %xmm2, %xmm2
125 ; SSE2-NEXT: mulps %xmm3, %xmm2
126 ; SSE2-NEXT: pand %xmm4, %xmm1
127 ; SSE2-NEXT: cvtdq2ps %xmm1, %xmm1
128 ; SSE2-NEXT: addps %xmm2, %xmm1
129 ; SSE2-NEXT: retq
130 ;
131 ; SSE41-LABEL: test_uitofp_v8i32_to_v8f32:
132 ; SSE41: # BB#0:
133 ; SSE41-NEXT: movdqa %xmm0, %xmm2
134 ; SSE41-NEXT: psrld $16, %xmm2
135 ; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
136 ; SSE41-NEXT: movaps {{.*#+}} xmm3 = [6.553600e+04,6.553600e+04,6.553600e+04,6.553600e+04]
137 ; SSE41-NEXT: mulps %xmm3, %xmm2
138 ; SSE41-NEXT: pxor %xmm4, %xmm4
139 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
140 ; SSE41-NEXT: cvtdq2ps %xmm0, %xmm0
141 ; SSE41-NEXT: addps %xmm2, %xmm0
142 ; SSE41-NEXT: movdqa %xmm1, %xmm2
143 ; SSE41-NEXT: psrld $16, %xmm2
144 ; SSE41-NEXT: cvtdq2ps %xmm2, %xmm2
145 ; SSE41-NEXT: mulps %xmm3, %xmm2
146 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
147 ; SSE41-NEXT: cvtdq2ps %xmm1, %xmm1
148 ; SSE41-NEXT: addps %xmm2, %xmm1
149 ; SSE41-NEXT: retq
118150 ;
119151 ; AVX-LABEL: test_uitofp_v8i32_to_v8f32:
120152 ; AVX: # BB#0:
8686 define <32 x i8> @combine_pshufb_and(<32 x i8> %a0) {
8787 ; X32-LABEL: combine_pshufb_and:
8888 ; X32: # BB#0:
89 ; X32-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[8,9],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[24,25],zero,zero,zero,zero,zero,zero
90 ; X32-NEXT: vpand {{\.LCPI.*}}, %ymm0, %ymm0
89 ; X32-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
9190 ; X32-NEXT: retl
9291 ;
9392 ; X64-LABEL: combine_pshufb_and:
9493 ; X64: # BB#0:
95 ; X64-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1],zero,zero,zero,zero,zero,zero,ymm0[8,9],zero,zero,zero,zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,zero,zero,ymm0[24,25],zero,zero,zero,zero,zero,zero
96 ; X64-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
94 ; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
9795 ; X64-NEXT: retq
9896 %1 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %a0, <32 x i8> )
9997 %2 = shufflevector <32 x i8> %1, <32 x i8> zeroinitializer, <32 x i32>
298298 define <16 x i8> @combine_pshufb_and(<16 x i8> %a0) {
299299 ; SSSE3-LABEL: combine_pshufb_and:
300300 ; SSSE3: # BB#0:
301 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1],zero,zero,zero,zero,zero,zero,xmm0[8,9],zero,zero,zero,zero,zero,zero
302 ; SSSE3-NEXT: pand {{.*}}(%rip), %xmm0
301 ; SSSE3-NEXT: andps {{.*}}(%rip), %xmm0
303302 ; SSSE3-NEXT: retq
304303 ;
305304 ; SSE41-LABEL: combine_pshufb_and: