llvm.org GIT mirror llvm / fad1a9a
[X86][SSE] Some basic tests for variable shuffles We don't really support non-constant shuffle masks, but these tests are for cases where BUILD_VECTOR is made up from vector extracts (as well as undef/zero scalars). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@265045 91177308-0d34-0410-b5e6-96231b3b80d8 Simon Pilgrim 4 years ago
2 changed file(s) with 1942 addition(s) and 0 deletion(s). Raw diff Collapse all Expand all
0 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE2
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSSE3
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
6
7 ;
8 ; Unary shuffle indices from registers
9 ;
10
11 define <2 x double> @var_shuffle_v2f64_v2f64_xx_i64(<2 x double> %x, i64 %i0, i64 %i1) nounwind {
12 ; SSE-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
13 ; SSE: # BB#0:
14 ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
15 ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
16 ; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
17 ; SSE-NEXT: retq
18 ;
19 ; AVX-LABEL: var_shuffle_v2f64_v2f64_xx_i64:
20 ; AVX: # BB#0:
21 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
22 ; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
23 ; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
24 ; AVX-NEXT: retq
25 %x0 = extractelement <2 x double> %x, i64 %i0
26 %x1 = extractelement <2 x double> %x, i64 %i1
27 %r0 = insertelement <2 x double> undef, double %x0, i32 0
28 %r1 = insertelement <2 x double> %r0, double %x1, i32 1
29 ret <2 x double> %r1
30 }
31
32 define <2 x i64> @var_shuffle_v2i64_v2i64_xx_i64(<2 x i64> %x, i32 %i0, i32 %i1) nounwind {
33 ; SSE-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
34 ; SSE: # BB#0:
35 ; SSE-NEXT: movslq %edi, %rax
36 ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
37 ; SSE-NEXT: movslq %esi, %rcx
38 ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
39 ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
40 ; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
41 ; SSE-NEXT: retq
42 ;
43 ; AVX-LABEL: var_shuffle_v2i64_v2i64_xx_i64:
44 ; AVX: # BB#0:
45 ; AVX-NEXT: movslq %edi, %rax
46 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
47 ; AVX-NEXT: movslq %esi, %rcx
48 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
49 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
50 ; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
51 ; AVX-NEXT: retq
52 %x0 = extractelement <2 x i64> %x, i32 %i0
53 %x1 = extractelement <2 x i64> %x, i32 %i1
54 %r0 = insertelement <2 x i64> undef, i64 %x0, i32 0
55 %r1 = insertelement <2 x i64> %r0, i64 %x1, i32 1
56 ret <2 x i64> %r1
57 }
58
59 define <4 x float> @var_shuffle_v4f32_v4f32_xxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
60 ; SSE2-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
61 ; SSE2: # BB#0:
62 ; SSE2-NEXT: movslq %edi, %rax
63 ; SSE2-NEXT: movslq %esi, %rsi
64 ; SSE2-NEXT: movslq %edx, %rdx
65 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
66 ; SSE2-NEXT: movslq %ecx, %rcx
67 ; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
68 ; SSE2-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
69 ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
70 ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
71 ; SSE2-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
72 ; SSE2-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
73 ; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
74 ; SSE2-NEXT: retq
75 ;
76 ; SSSE3-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
77 ; SSSE3: # BB#0:
78 ; SSSE3-NEXT: movslq %edi, %rax
79 ; SSSE3-NEXT: movslq %esi, %rsi
80 ; SSSE3-NEXT: movslq %edx, %rdx
81 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
82 ; SSSE3-NEXT: movslq %ecx, %rcx
83 ; SSSE3-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
84 ; SSSE3-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
85 ; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
86 ; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
87 ; SSSE3-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
88 ; SSSE3-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
89 ; SSSE3-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
90 ; SSSE3-NEXT: retq
91 ;
92 ; SSE41-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
93 ; SSE41: # BB#0:
94 ; SSE41-NEXT: movslq %edi, %rax
95 ; SSE41-NEXT: movslq %esi, %rsi
96 ; SSE41-NEXT: movslq %edx, %rdx
97 ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
98 ; SSE41-NEXT: movslq %ecx, %rcx
99 ; SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
100 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
101 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
102 ; SSE41-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
103 ; SSE41-NEXT: retq
104 ;
105 ; AVX-LABEL: var_shuffle_v4f32_v4f32_xxxx_i32:
106 ; AVX: # BB#0:
107 ; AVX-NEXT: movslq %edi, %rax
108 ; AVX-NEXT: movslq %esi, %rsi
109 ; AVX-NEXT: movslq %edx, %rdx
110 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
111 ; AVX-NEXT: movslq %ecx, %rcx
112 ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
113 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
114 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
115 ; AVX-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
116 ; AVX-NEXT: retq
117 %x0 = extractelement <4 x float> %x, i32 %i0
118 %x1 = extractelement <4 x float> %x, i32 %i1
119 %x2 = extractelement <4 x float> %x, i32 %i2
120 %x3 = extractelement <4 x float> %x, i32 %i3
121 %r0 = insertelement <4 x float> undef, float %x0, i32 0
122 %r1 = insertelement <4 x float> %r0, float %x1, i32 1
123 %r2 = insertelement <4 x float> %r1, float %x2, i32 2
124 %r3 = insertelement <4 x float> %r2, float %x3, i32 3
125 ret <4 x float> %r3
126 }
127
128 define <4 x i32> @var_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
129 ; SSE2-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
130 ; SSE2: # BB#0:
131 ; SSE2-NEXT: movslq %edi, %rax
132 ; SSE2-NEXT: movslq %esi, %rsi
133 ; SSE2-NEXT: movslq %edx, %rdx
134 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
135 ; SSE2-NEXT: movslq %ecx, %rcx
136 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
137 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
138 ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
139 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
140 ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
141 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
142 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
143 ; SSE2-NEXT: retq
144 ;
145 ; SSSE3-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
146 ; SSSE3: # BB#0:
147 ; SSSE3-NEXT: movslq %edi, %rax
148 ; SSSE3-NEXT: movslq %esi, %rsi
149 ; SSSE3-NEXT: movslq %edx, %rdx
150 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
151 ; SSSE3-NEXT: movslq %ecx, %rcx
152 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
153 ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
154 ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
155 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
156 ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
157 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
158 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
159 ; SSSE3-NEXT: retq
160 ;
161 ; SSE41-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
162 ; SSE41: # BB#0:
163 ; SSE41-NEXT: movslq %edi, %rax
164 ; SSE41-NEXT: movslq %esi, %rsi
165 ; SSE41-NEXT: movslq %edx, %rdx
166 ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
167 ; SSE41-NEXT: movslq %ecx, %rcx
168 ; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
169 ; SSE41-NEXT: pinsrd $1, -24(%rsp,%rsi,4), %xmm0
170 ; SSE41-NEXT: pinsrd $2, -24(%rsp,%rdx,4), %xmm0
171 ; SSE41-NEXT: pinsrd $3, -24(%rsp,%rcx,4), %xmm0
172 ; SSE41-NEXT: retq
173 ;
174 ; AVX-LABEL: var_shuffle_v4i32_v4i32_xxxx_i32:
175 ; AVX: # BB#0:
176 ; AVX-NEXT: movslq %edi, %rax
177 ; AVX-NEXT: movslq %esi, %rsi
178 ; AVX-NEXT: movslq %edx, %rdx
179 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
180 ; AVX-NEXT: movslq %ecx, %rcx
181 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
182 ; AVX-NEXT: vpinsrd $1, -24(%rsp,%rsi,4), %xmm0, %xmm0
183 ; AVX-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
184 ; AVX-NEXT: vpinsrd $3, -24(%rsp,%rcx,4), %xmm0, %xmm0
185 ; AVX-NEXT: retq
186 %x0 = extractelement <4 x i32> %x, i32 %i0
187 %x1 = extractelement <4 x i32> %x, i32 %i1
188 %x2 = extractelement <4 x i32> %x, i32 %i2
189 %x3 = extractelement <4 x i32> %x, i32 %i3
190 %r0 = insertelement <4 x i32> undef, i32 %x0, i32 0
191 %r1 = insertelement <4 x i32> %r0, i32 %x1, i32 1
192 %r2 = insertelement <4 x i32> %r1, i32 %x2, i32 2
193 %r3 = insertelement <4 x i32> %r2, i32 %x3, i32 3
194 ret <4 x i32> %r3
195 }
196
197 define <8 x i16> @var_shuffle_v8i16_v8i16_xxxxxxxx_i16(<8 x i16> %x, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
198 ; SSE2-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
199 ; SSE2: # BB#0:
200 ; SSE2-NEXT: movswq %di, %rax
201 ; SSE2-NEXT: movswq %si, %rsi
202 ; SSE2-NEXT: movswq %dx, %rdx
203 ; SSE2-NEXT: movswq %cx, %r10
204 ; SSE2-NEXT: movswq %r8w, %r11
205 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
206 ; SSE2-NEXT: movswq %r9w, %r8
207 ; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rcx
208 ; SSE2-NEXT: movswq {{[0-9]+}}(%rsp), %rdi
209 ; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
210 ; SSE2-NEXT: movzwl -24(%rsp,%rdi,2), %edi
211 ; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax
212 ; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi
213 ; SSE2-NEXT: movd %ecx, %xmm0
214 ; SSE2-NEXT: movzwl -24(%rsp,%rdx,2), %ecx
215 ; SSE2-NEXT: movd %ecx, %xmm1
216 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
217 ; SSE2-NEXT: movzwl -24(%rsp,%r10,2), %ecx
218 ; SSE2-NEXT: movd %eax, %xmm0
219 ; SSE2-NEXT: movzwl -24(%rsp,%r11,2), %eax
220 ; SSE2-NEXT: movd %eax, %xmm2
221 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
222 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
223 ; SSE2-NEXT: movd %edi, %xmm1
224 ; SSE2-NEXT: movd %ecx, %xmm2
225 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
226 ; SSE2-NEXT: movd %esi, %xmm1
227 ; SSE2-NEXT: movzwl -24(%rsp,%r8,2), %eax
228 ; SSE2-NEXT: movd %eax, %xmm3
229 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
230 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
231 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
232 ; SSE2-NEXT: retq
233 ;
234 ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
235 ; SSSE3: # BB#0:
236 ; SSSE3-NEXT: movswq %di, %rax
237 ; SSSE3-NEXT: movswq %si, %rsi
238 ; SSSE3-NEXT: movswq %dx, %rdx
239 ; SSSE3-NEXT: movswq %cx, %r10
240 ; SSSE3-NEXT: movswq %r8w, %r11
241 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
242 ; SSSE3-NEXT: movswq %r9w, %r8
243 ; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rcx
244 ; SSSE3-NEXT: movswq {{[0-9]+}}(%rsp), %rdi
245 ; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
246 ; SSSE3-NEXT: movzwl -24(%rsp,%rdi,2), %edi
247 ; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
248 ; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi
249 ; SSSE3-NEXT: movd %ecx, %xmm0
250 ; SSSE3-NEXT: movzwl -24(%rsp,%rdx,2), %ecx
251 ; SSSE3-NEXT: movd %ecx, %xmm1
252 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
253 ; SSSE3-NEXT: movzwl -24(%rsp,%r10,2), %ecx
254 ; SSSE3-NEXT: movd %eax, %xmm0
255 ; SSSE3-NEXT: movzwl -24(%rsp,%r11,2), %eax
256 ; SSSE3-NEXT: movd %eax, %xmm2
257 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
258 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
259 ; SSSE3-NEXT: movd %edi, %xmm1
260 ; SSSE3-NEXT: movd %ecx, %xmm2
261 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
262 ; SSSE3-NEXT: movd %esi, %xmm1
263 ; SSSE3-NEXT: movzwl -24(%rsp,%r8,2), %eax
264 ; SSSE3-NEXT: movd %eax, %xmm3
265 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3]
266 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
267 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
268 ; SSSE3-NEXT: retq
269 ;
270 ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
271 ; SSE41: # BB#0:
272 ; SSE41-NEXT: pushq %rbx
273 ; SSE41-NEXT: movswq %di, %rax
274 ; SSE41-NEXT: movswq %si, %rbx
275 ; SSE41-NEXT: movswq %dx, %r11
276 ; SSE41-NEXT: movswq %cx, %r10
277 ; SSE41-NEXT: movswq %r8w, %rdi
278 ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
279 ; SSE41-NEXT: movswq %r9w, %rcx
280 ; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %rdx
281 ; SSE41-NEXT: movswq {{[0-9]+}}(%rsp), %rsi
282 ; SSE41-NEXT: movzwl -16(%rsp,%rdx,2), %edx
283 ; SSE41-NEXT: movzwl -16(%rsp,%rsi,2), %esi
284 ; SSE41-NEXT: movzwl -16(%rsp,%rax,2), %eax
285 ; SSE41-NEXT: movd %eax, %xmm0
286 ; SSE41-NEXT: pinsrw $1, -16(%rsp,%rbx,2), %xmm0
287 ; SSE41-NEXT: pinsrw $2, -16(%rsp,%r11,2), %xmm0
288 ; SSE41-NEXT: pinsrw $3, -16(%rsp,%r10,2), %xmm0
289 ; SSE41-NEXT: pinsrw $4, -16(%rsp,%rdi,2), %xmm0
290 ; SSE41-NEXT: pinsrw $5, -16(%rsp,%rcx,2), %xmm0
291 ; SSE41-NEXT: pinsrw $6, %edx, %xmm0
292 ; SSE41-NEXT: pinsrw $7, %esi, %xmm0
293 ; SSE41-NEXT: popq %rbx
294 ; SSE41-NEXT: retq
295 ;
296 ; AVX-LABEL: var_shuffle_v8i16_v8i16_xxxxxxxx_i16:
297 ; AVX: # BB#0:
298 ; AVX-NEXT: pushq %r14
299 ; AVX-NEXT: pushq %rbx
300 ; AVX-NEXT: movswq %di, %r10
301 ; AVX-NEXT: movswq %si, %r11
302 ; AVX-NEXT: movswq %dx, %r14
303 ; AVX-NEXT: movswq %cx, %rcx
304 ; AVX-NEXT: movswq %r8w, %rdi
305 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
306 ; AVX-NEXT: movswq %r9w, %rax
307 ; AVX-NEXT: movswq {{[0-9]+}}(%rsp), %rsi
308 ; AVX-NEXT: movswq {{[0-9]+}}(%rsp), %rdx
309 ; AVX-NEXT: movzwl -24(%rsp,%rsi,2), %esi
310 ; AVX-NEXT: movzwl -24(%rsp,%rdx,2), %edx
311 ; AVX-NEXT: movzwl -24(%rsp,%r10,2), %ebx
312 ; AVX-NEXT: vmovd %ebx, %xmm0
313 ; AVX-NEXT: vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0
314 ; AVX-NEXT: vpinsrw $2, -24(%rsp,%r14,2), %xmm0, %xmm0
315 ; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
316 ; AVX-NEXT: vpinsrw $4, -24(%rsp,%rdi,2), %xmm0, %xmm0
317 ; AVX-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
318 ; AVX-NEXT: vpinsrw $6, %esi, %xmm0, %xmm0
319 ; AVX-NEXT: vpinsrw $7, %edx, %xmm0, %xmm0
320 ; AVX-NEXT: popq %rbx
321 ; AVX-NEXT: popq %r14
322 ; AVX-NEXT: retq
323 %x0 = extractelement <8 x i16> %x, i16 %i0
324 %x1 = extractelement <8 x i16> %x, i16 %i1
325 %x2 = extractelement <8 x i16> %x, i16 %i2
326 %x3 = extractelement <8 x i16> %x, i16 %i3
327 %x4 = extractelement <8 x i16> %x, i16 %i4
328 %x5 = extractelement <8 x i16> %x, i16 %i5
329 %x6 = extractelement <8 x i16> %x, i16 %i6
330 %x7 = extractelement <8 x i16> %x, i16 %i7
331 %r0 = insertelement <8 x i16> undef, i16 %x0, i32 0
332 %r1 = insertelement <8 x i16> %r0, i16 %x1, i32 1
333 %r2 = insertelement <8 x i16> %r1, i16 %x2, i32 2
334 %r3 = insertelement <8 x i16> %r2, i16 %x3, i32 3
335 %r4 = insertelement <8 x i16> %r3, i16 %x4, i32 4
336 %r5 = insertelement <8 x i16> %r4, i16 %x5, i32 5
337 %r6 = insertelement <8 x i16> %r5, i16 %x6, i32 6
338 %r7 = insertelement <8 x i16> %r6, i16 %x7, i32 7
339 ret <8 x i16> %r7
340 }
341
342 define <16 x i8> @var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8 %i0, i8 %i1, i8 %i2, i8 %i3, i8 %i4, i8 %i5, i8 %i6, i8 %i7, i8 %i8, i8 %i9, i8 %i10, i8 %i11, i8 %i12, i8 %i13, i8 %i14, i8 %i15) nounwind {
343 ; SSE2-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
344 ; SSE2: # BB#0:
345 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
346 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %r10
347 ; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %r11
348 ; SSE2-NEXT: movzbl (%r10,%r11), %eax
349 ; SSE2-NEXT: movd %eax, %xmm15
350 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
351 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
352 ; SSE2-NEXT: movd %eax, %xmm8
353 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
354 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
355 ; SSE2-NEXT: movd %eax, %xmm9
356 ; SSE2-NEXT: movsbq %dl, %rax
357 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
358 ; SSE2-NEXT: movd %eax, %xmm3
359 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
360 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
361 ; SSE2-NEXT: movd %eax, %xmm10
362 ; SSE2-NEXT: movsbq %dil, %rax
363 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
364 ; SSE2-NEXT: movd %eax, %xmm0
365 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
366 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
367 ; SSE2-NEXT: movd %eax, %xmm11
368 ; SSE2-NEXT: movsbq %r8b, %rax
369 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
370 ; SSE2-NEXT: movd %eax, %xmm7
371 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
372 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
373 ; SSE2-NEXT: movd %eax, %xmm2
374 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
375 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
376 ; SSE2-NEXT: movd %eax, %xmm12
377 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
378 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
379 ; SSE2-NEXT: movd %eax, %xmm13
380 ; SSE2-NEXT: movsbq %cl, %rax
381 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
382 ; SSE2-NEXT: movd %eax, %xmm6
383 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
384 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
385 ; SSE2-NEXT: movd %eax, %xmm14
386 ; SSE2-NEXT: movsbq %sil, %rax
387 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
388 ; SSE2-NEXT: movd %eax, %xmm5
389 ; SSE2-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
390 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
391 ; SSE2-NEXT: movd %eax, %xmm4
392 ; SSE2-NEXT: movsbq %r9b, %rax
393 ; SSE2-NEXT: movzbl (%rax,%r11), %eax
394 ; SSE2-NEXT: movd %eax, %xmm1
395 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
396 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
397 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
398 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
399 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
400 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
401 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
402 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
403 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
404 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
405 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
406 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
407 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
408 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
409 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
410 ; SSE2-NEXT: retq
411 ;
412 ; SSSE3-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
413 ; SSSE3: # BB#0:
414 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
415 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %r10
416 ; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %r11
417 ; SSSE3-NEXT: movzbl (%r10,%r11), %eax
418 ; SSSE3-NEXT: movd %eax, %xmm15
419 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
420 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
421 ; SSSE3-NEXT: movd %eax, %xmm8
422 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
423 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
424 ; SSSE3-NEXT: movd %eax, %xmm9
425 ; SSSE3-NEXT: movsbq %dl, %rax
426 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
427 ; SSSE3-NEXT: movd %eax, %xmm3
428 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
429 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
430 ; SSSE3-NEXT: movd %eax, %xmm10
431 ; SSSE3-NEXT: movsbq %dil, %rax
432 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
433 ; SSSE3-NEXT: movd %eax, %xmm0
434 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
435 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
436 ; SSSE3-NEXT: movd %eax, %xmm11
437 ; SSSE3-NEXT: movsbq %r8b, %rax
438 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
439 ; SSSE3-NEXT: movd %eax, %xmm7
440 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
441 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
442 ; SSSE3-NEXT: movd %eax, %xmm2
443 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
444 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
445 ; SSSE3-NEXT: movd %eax, %xmm12
446 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
447 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
448 ; SSSE3-NEXT: movd %eax, %xmm13
449 ; SSSE3-NEXT: movsbq %cl, %rax
450 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
451 ; SSSE3-NEXT: movd %eax, %xmm6
452 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
453 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
454 ; SSSE3-NEXT: movd %eax, %xmm14
455 ; SSSE3-NEXT: movsbq %sil, %rax
456 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
457 ; SSSE3-NEXT: movd %eax, %xmm5
458 ; SSSE3-NEXT: movsbq {{[0-9]+}}(%rsp), %rax
459 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
460 ; SSSE3-NEXT: movd %eax, %xmm4
461 ; SSSE3-NEXT: movsbq %r9b, %rax
462 ; SSSE3-NEXT: movzbl (%rax,%r11), %eax
463 ; SSSE3-NEXT: movd %eax, %xmm1
464 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
465 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
466 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3],xmm3[4],xmm15[4],xmm3[5],xmm15[5],xmm3[6],xmm15[6],xmm3[7],xmm15[7]
467 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
468 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
469 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
470 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
471 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
472 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
473 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
474 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3],xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
475 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
476 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3],xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
477 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3],xmm5[4],xmm6[4],xmm5[5],xmm6[5],xmm5[6],xmm6[6],xmm5[7],xmm6[7]
478 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
479 ; SSSE3-NEXT: retq
480 ;
481 ; SSE41-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
482 ; SSE41: # BB#0:
483 ; SSE41-NEXT: pushq %rbp
484 ; SSE41-NEXT: pushq %r15
485 ; SSE41-NEXT: pushq %r14
486 ; SSE41-NEXT: pushq %r13
487 ; SSE41-NEXT: pushq %r12
488 ; SSE41-NEXT: pushq %rbx
489 ; SSE41-NEXT: movsbq %dil, %r15
490 ; SSE41-NEXT: movsbq %sil, %r14
491 ; SSE41-NEXT: movsbq %dl, %r11
492 ; SSE41-NEXT: movsbq %cl, %r10
493 ; SSE41-NEXT: movsbq %r8b, %r8
494 ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
495 ; SSE41-NEXT: movsbq %r9b, %r9
496 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r12
497 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r13
498 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rbp
499 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rbx
500 ; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
501 ; SSE41-NEXT: movzbl (%r15,%rax), %ecx
502 ; SSE41-NEXT: movd %ecx, %xmm0
503 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r15
504 ; SSE41-NEXT: pinsrb $1, (%r14,%rax), %xmm0
505 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r14
506 ; SSE41-NEXT: pinsrb $2, (%r11,%rax), %xmm0
507 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r11
508 ; SSE41-NEXT: pinsrb $3, (%r10,%rax), %xmm0
509 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %r10
510 ; SSE41-NEXT: pinsrb $4, (%r8,%rax), %xmm0
511 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx
512 ; SSE41-NEXT: pinsrb $5, (%r9,%rax), %xmm0
513 ; SSE41-NEXT: movsbq {{[0-9]+}}(%rsp), %rdx
514 ; SSE41-NEXT: movzbl (%r12,%rax), %esi
515 ; SSE41-NEXT: movzbl (%r13,%rax), %edi
516 ; SSE41-NEXT: movzbl (%rbp,%rax), %ebp
517 ; SSE41-NEXT: movzbl (%rbx,%rax), %ebx
518 ; SSE41-NEXT: movzbl (%r15,%rax), %r8d
519 ; SSE41-NEXT: movzbl (%r14,%rax), %r9d
520 ; SSE41-NEXT: movzbl (%r11,%rax), %r11d
521 ; SSE41-NEXT: movzbl (%r10,%rax), %r10d
522 ; SSE41-NEXT: movzbl (%rcx,%rax), %ecx
523 ; SSE41-NEXT: movzbl (%rdx,%rax), %eax
524 ; SSE41-NEXT: pinsrb $6, %esi, %xmm0
525 ; SSE41-NEXT: pinsrb $7, %edi, %xmm0
526 ; SSE41-NEXT: pinsrb $8, %ebp, %xmm0
527 ; SSE41-NEXT: pinsrb $9, %ebx, %xmm0
528 ; SSE41-NEXT: pinsrb $10, %r8d, %xmm0
529 ; SSE41-NEXT: pinsrb $11, %r9d, %xmm0
530 ; SSE41-NEXT: pinsrb $12, %r11d, %xmm0
531 ; SSE41-NEXT: pinsrb $13, %r10d, %xmm0
532 ; SSE41-NEXT: pinsrb $14, %ecx, %xmm0
533 ; SSE41-NEXT: pinsrb $15, %eax, %xmm0
534 ; SSE41-NEXT: popq %rbx
535 ; SSE41-NEXT: popq %r12
536 ; SSE41-NEXT: popq %r13
537 ; SSE41-NEXT: popq %r14
538 ; SSE41-NEXT: popq %r15
539 ; SSE41-NEXT: popq %rbp
540 ; SSE41-NEXT: retq
541 ;
542 ; AVX-LABEL: var_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
543 ; AVX: # BB#0:
544 ; AVX-NEXT: pushq %rbp
545 ; AVX-NEXT: pushq %r15
546 ; AVX-NEXT: pushq %r14
547 ; AVX-NEXT: pushq %r13
548 ; AVX-NEXT: pushq %r12
549 ; AVX-NEXT: pushq %rbx
550 ; AVX-NEXT: movsbq %dil, %r10
551 ; AVX-NEXT: movsbq %sil, %r11
552 ; AVX-NEXT: movsbq %dl, %r14
553 ; AVX-NEXT: movsbq %cl, %r15
554 ; AVX-NEXT: movsbq %r8b, %r8
555 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
556 ; AVX-NEXT: movsbq %r9b, %r9
557 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r12
558 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r13
559 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rbp
560 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rcx
561 ; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rdi
562 ; AVX-NEXT: movzbl (%r10,%rdi), %eax
563 ; AVX-NEXT: vmovd %eax, %xmm0
564 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r10
565 ; AVX-NEXT: vpinsrb $1, (%r11,%rdi), %xmm0, %xmm0
566 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r11
567 ; AVX-NEXT: vpinsrb $2, (%r14,%rdi), %xmm0, %xmm0
568 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r14
569 ; AVX-NEXT: vpinsrb $3, (%r15,%rdi), %xmm0, %xmm0
570 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r15
571 ; AVX-NEXT: vpinsrb $4, (%r8,%rdi), %xmm0, %xmm0
572 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %r8
573 ; AVX-NEXT: vpinsrb $5, (%r9,%rdi), %xmm0, %xmm0
574 ; AVX-NEXT: movsbq {{[0-9]+}}(%rsp), %rsi
575 ; AVX-NEXT: movzbl (%r12,%rdi), %edx
576 ; AVX-NEXT: movzbl (%r13,%rdi), %ebx
577 ; AVX-NEXT: movzbl (%rbp,%rdi), %ebp
578 ; AVX-NEXT: movzbl (%rcx,%rdi), %ecx
579 ; AVX-NEXT: movzbl (%r10,%rdi), %eax
580 ; AVX-NEXT: movzbl (%r11,%rdi), %r9d
581 ; AVX-NEXT: movzbl (%r14,%rdi), %r10d
582 ; AVX-NEXT: movzbl (%r15,%rdi), %r11d
583 ; AVX-NEXT: movzbl (%r8,%rdi), %r8d
584 ; AVX-NEXT: movzbl (%rsi,%rdi), %esi
585 ; AVX-NEXT: vpinsrb $6, %edx, %xmm0, %xmm0
586 ; AVX-NEXT: vpinsrb $7, %ebx, %xmm0, %xmm0
587 ; AVX-NEXT: vpinsrb $8, %ebp, %xmm0, %xmm0
588 ; AVX-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
589 ; AVX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
590 ; AVX-NEXT: vpinsrb $11, %r9d, %xmm0, %xmm0
591 ; AVX-NEXT: vpinsrb $12, %r10d, %xmm0, %xmm0
592 ; AVX-NEXT: vpinsrb $13, %r11d, %xmm0, %xmm0
593 ; AVX-NEXT: vpinsrb $14, %r8d, %xmm0, %xmm0
594 ; AVX-NEXT: vpinsrb $15, %esi, %xmm0, %xmm0
595 ; AVX-NEXT: popq %rbx
596 ; AVX-NEXT: popq %r12
597 ; AVX-NEXT: popq %r13
598 ; AVX-NEXT: popq %r14
599 ; AVX-NEXT: popq %r15
600 ; AVX-NEXT: popq %rbp
601 ; AVX-NEXT: retq
602 %x0 = extractelement <16 x i8> %x, i8 %i0
603 %x1 = extractelement <16 x i8> %x, i8 %i1
604 %x2 = extractelement <16 x i8> %x, i8 %i2
605 %x3 = extractelement <16 x i8> %x, i8 %i3
606 %x4 = extractelement <16 x i8> %x, i8 %i4
607 %x5 = extractelement <16 x i8> %x, i8 %i5
608 %x6 = extractelement <16 x i8> %x, i8 %i6
609 %x7 = extractelement <16 x i8> %x, i8 %i7
610 %x8 = extractelement <16 x i8> %x, i8 %i8
611 %x9 = extractelement <16 x i8> %x, i8 %i9
612 %x10 = extractelement <16 x i8> %x, i8 %i10
613 %x11 = extractelement <16 x i8> %x, i8 %i11
614 %x12 = extractelement <16 x i8> %x, i8 %i12
615 %x13 = extractelement <16 x i8> %x, i8 %i13
616 %x14 = extractelement <16 x i8> %x, i8 %i14
617 %x15 = extractelement <16 x i8> %x, i8 %i15
618 %r0 = insertelement <16 x i8> undef, i8 %x0 , i32 0
619 %r1 = insertelement <16 x i8> %r0 , i8 %x1 , i32 1
620 %r2 = insertelement <16 x i8> %r1 , i8 %x2 , i32 2
621 %r3 = insertelement <16 x i8> %r2 , i8 %x3 , i32 3
622 %r4 = insertelement <16 x i8> %r3 , i8 %x4 , i32 4
623 %r5 = insertelement <16 x i8> %r4 , i8 %x5 , i32 5
624 %r6 = insertelement <16 x i8> %r5 , i8 %x6 , i32 6
625 %r7 = insertelement <16 x i8> %r6 , i8 %x7 , i32 7
626 %r8 = insertelement <16 x i8> %r7 , i8 %x8 , i32 8
627 %r9 = insertelement <16 x i8> %r8 , i8 %x9 , i32 9
628 %r10 = insertelement <16 x i8> %r9 , i8 %x10, i32 10
629 %r11 = insertelement <16 x i8> %r10, i8 %x11, i32 11
630 %r12 = insertelement <16 x i8> %r11, i8 %x12, i32 12
631 %r13 = insertelement <16 x i8> %r12, i8 %x13, i32 13
632 %r14 = insertelement <16 x i8> %r13, i8 %x14, i32 14
633 %r15 = insertelement <16 x i8> %r14, i8 %x15, i32 15
634 ret <16 x i8> %r15
635 }
636
637 ;
638 ; Unary shuffle indices from memory
639 ;
640
641 define <4 x i32> @mem_shuffle_v4i32_v4i32_xxxx_i32(<4 x i32> %x, i32* %i) nounwind {
642 ; SSE2-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
643 ; SSE2: # BB#0:
644 ; SSE2-NEXT: movslq (%rdi), %rax
645 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
646 ; SSE2-NEXT: movslq 4(%rdi), %rcx
647 ; SSE2-NEXT: movslq 8(%rdi), %rdx
648 ; SSE2-NEXT: movslq 12(%rdi), %rsi
649 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
650 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
651 ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
652 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
653 ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
654 ; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
655 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
656 ; SSE2-NEXT: retq
657 ;
658 ; SSSE3-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
659 ; SSSE3: # BB#0:
660 ; SSSE3-NEXT: movslq (%rdi), %rax
661 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
662 ; SSSE3-NEXT: movslq 4(%rdi), %rcx
663 ; SSSE3-NEXT: movslq 8(%rdi), %rdx
664 ; SSSE3-NEXT: movslq 12(%rdi), %rsi
665 ; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
666 ; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
667 ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
668 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
669 ; SSSE3-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
670 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
671 ; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
672 ; SSSE3-NEXT: retq
673 ;
674 ; SSE41-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
675 ; SSE41: # BB#0:
676 ; SSE41-NEXT: movslq (%rdi), %rax
677 ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
678 ; SSE41-NEXT: movslq 4(%rdi), %rcx
679 ; SSE41-NEXT: movslq 8(%rdi), %rdx
680 ; SSE41-NEXT: movslq 12(%rdi), %rsi
681 ; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
682 ; SSE41-NEXT: pinsrd $1, -24(%rsp,%rcx,4), %xmm0
683 ; SSE41-NEXT: pinsrd $2, -24(%rsp,%rdx,4), %xmm0
684 ; SSE41-NEXT: pinsrd $3, -24(%rsp,%rsi,4), %xmm0
685 ; SSE41-NEXT: retq
686 ;
687 ; AVX-LABEL: mem_shuffle_v4i32_v4i32_xxxx_i32:
688 ; AVX: # BB#0:
689 ; AVX-NEXT: movslq (%rdi), %rax
690 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
691 ; AVX-NEXT: movslq 4(%rdi), %rcx
692 ; AVX-NEXT: movslq 8(%rdi), %rdx
693 ; AVX-NEXT: movslq 12(%rdi), %rsi
694 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
695 ; AVX-NEXT: vpinsrd $1, -24(%rsp,%rcx,4), %xmm0, %xmm0
696 ; AVX-NEXT: vpinsrd $2, -24(%rsp,%rdx,4), %xmm0, %xmm0
697 ; AVX-NEXT: vpinsrd $3, -24(%rsp,%rsi,4), %xmm0, %xmm0
698 ; AVX-NEXT: retq
699 %p0 = getelementptr inbounds i32, i32* %i, i64 0
700 %p1 = getelementptr inbounds i32, i32* %i, i64 1
701 %p2 = getelementptr inbounds i32, i32* %i, i64 2
702 %p3 = getelementptr inbounds i32, i32* %i, i64 3
703 %i0 = load i32, i32* %p0, align 4
704 %i1 = load i32, i32* %p1, align 4
705 %i2 = load i32, i32* %p2, align 4
706 %i3 = load i32, i32* %p3, align 4
707 %x0 = extractelement <4 x i32> %x, i32 %i0
708 %x1 = extractelement <4 x i32> %x, i32 %i1
709 %x2 = extractelement <4 x i32> %x, i32 %i2
710 %x3 = extractelement <4 x i32> %x, i32 %i3
711 %r0 = insertelement <4 x i32> undef, i32 %x0, i32 0
712 %r1 = insertelement <4 x i32> %r0, i32 %x1, i32 1
713 %r2 = insertelement <4 x i32> %r1, i32 %x2, i32 2
714 %r3 = insertelement <4 x i32> %r2, i32 %x3, i32 3
715 ret <4 x i32> %r3
716 }
717
718 define <16 x i8> @mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8(<16 x i8> %x, i8* %i) nounwind {
719 ; SSE2-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
720 ; SSE2: # BB#0:
721 ; SSE2-NEXT: movsbq (%rdi), %rcx
722 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
723 ; SSE2-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
724 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
725 ; SSE2-NEXT: movd %ecx, %xmm0
726 ; SSE2-NEXT: movsbq 8(%rdi), %rcx
727 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
728 ; SSE2-NEXT: movd %ecx, %xmm8
729 ; SSE2-NEXT: movsbq 12(%rdi), %rcx
730 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
731 ; SSE2-NEXT: movd %ecx, %xmm9
732 ; SSE2-NEXT: movsbq 4(%rdi), %rcx
733 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
734 ; SSE2-NEXT: movd %ecx, %xmm3
735 ; SSE2-NEXT: movsbq 14(%rdi), %rcx
736 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
737 ; SSE2-NEXT: movd %ecx, %xmm10
738 ; SSE2-NEXT: movsbq 6(%rdi), %rcx
739 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
740 ; SSE2-NEXT: movd %ecx, %xmm5
741 ; SSE2-NEXT: movsbq 10(%rdi), %rcx
742 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
743 ; SSE2-NEXT: movd %ecx, %xmm11
744 ; SSE2-NEXT: movsbq 2(%rdi), %rcx
745 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
746 ; SSE2-NEXT: movd %ecx, %xmm7
747 ; SSE2-NEXT: movsbq 15(%rdi), %rcx
748 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
749 ; SSE2-NEXT: movd %ecx, %xmm12
750 ; SSE2-NEXT: movsbq 7(%rdi), %rcx
751 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
752 ; SSE2-NEXT: movd %ecx, %xmm2
753 ; SSE2-NEXT: movsbq 11(%rdi), %rcx
754 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
755 ; SSE2-NEXT: movd %ecx, %xmm13
756 ; SSE2-NEXT: movsbq 3(%rdi), %rcx
757 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
758 ; SSE2-NEXT: movd %ecx, %xmm6
759 ; SSE2-NEXT: movsbq 13(%rdi), %rcx
760 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
761 ; SSE2-NEXT: movd %ecx, %xmm14
762 ; SSE2-NEXT: movsbq 5(%rdi), %rcx
763 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
764 ; SSE2-NEXT: movd %ecx, %xmm4
765 ; SSE2-NEXT: movsbq 9(%rdi), %rcx
766 ; SSE2-NEXT: movzbl (%rcx,%rax), %ecx
767 ; SSE2-NEXT: movd %ecx, %xmm15
768 ; SSE2-NEXT: movsbq 1(%rdi), %rcx
769 ; SSE2-NEXT: movzbl (%rcx,%rax), %eax
770 ; SSE2-NEXT: movd %eax, %xmm1
771 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
772 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
773 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
774 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
775 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
776 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
777 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
778 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
779 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
780 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
781 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
782 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
783 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
784 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
785 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
786 ; SSE2-NEXT: retq
787 ;
788 ; SSSE3-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
789 ; SSSE3: # BB#0:
790 ; SSSE3-NEXT: movsbq (%rdi), %rcx
791 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
792 ; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
793 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
794 ; SSSE3-NEXT: movd %ecx, %xmm0
795 ; SSSE3-NEXT: movsbq 8(%rdi), %rcx
796 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
797 ; SSSE3-NEXT: movd %ecx, %xmm8
798 ; SSSE3-NEXT: movsbq 12(%rdi), %rcx
799 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
800 ; SSSE3-NEXT: movd %ecx, %xmm9
801 ; SSSE3-NEXT: movsbq 4(%rdi), %rcx
802 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
803 ; SSSE3-NEXT: movd %ecx, %xmm3
804 ; SSSE3-NEXT: movsbq 14(%rdi), %rcx
805 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
806 ; SSSE3-NEXT: movd %ecx, %xmm10
807 ; SSSE3-NEXT: movsbq 6(%rdi), %rcx
808 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
809 ; SSSE3-NEXT: movd %ecx, %xmm5
810 ; SSSE3-NEXT: movsbq 10(%rdi), %rcx
811 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
812 ; SSSE3-NEXT: movd %ecx, %xmm11
813 ; SSSE3-NEXT: movsbq 2(%rdi), %rcx
814 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
815 ; SSSE3-NEXT: movd %ecx, %xmm7
816 ; SSSE3-NEXT: movsbq 15(%rdi), %rcx
817 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
818 ; SSSE3-NEXT: movd %ecx, %xmm12
819 ; SSSE3-NEXT: movsbq 7(%rdi), %rcx
820 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
821 ; SSSE3-NEXT: movd %ecx, %xmm2
822 ; SSSE3-NEXT: movsbq 11(%rdi), %rcx
823 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
824 ; SSSE3-NEXT: movd %ecx, %xmm13
825 ; SSSE3-NEXT: movsbq 3(%rdi), %rcx
826 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
827 ; SSSE3-NEXT: movd %ecx, %xmm6
828 ; SSSE3-NEXT: movsbq 13(%rdi), %rcx
829 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
830 ; SSSE3-NEXT: movd %ecx, %xmm14
831 ; SSSE3-NEXT: movsbq 5(%rdi), %rcx
832 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
833 ; SSSE3-NEXT: movd %ecx, %xmm4
834 ; SSSE3-NEXT: movsbq 9(%rdi), %rcx
835 ; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
836 ; SSSE3-NEXT: movd %ecx, %xmm15
837 ; SSSE3-NEXT: movsbq 1(%rdi), %rcx
838 ; SSSE3-NEXT: movzbl (%rcx,%rax), %eax
839 ; SSSE3-NEXT: movd %eax, %xmm1
840 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
841 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
842 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
843 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1],xmm5[2],xmm10[2],xmm5[3],xmm10[3],xmm5[4],xmm10[4],xmm5[5],xmm10[5],xmm5[6],xmm10[6],xmm5[7],xmm10[7]
844 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm11[0],xmm7[1],xmm11[1],xmm7[2],xmm11[2],xmm7[3],xmm11[3],xmm7[4],xmm11[4],xmm7[5],xmm11[5],xmm7[6],xmm11[6],xmm7[7],xmm11[7]
845 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm5[0],xmm7[1],xmm5[1],xmm7[2],xmm5[2],xmm7[3],xmm5[3],xmm7[4],xmm5[4],xmm7[5],xmm5[5],xmm7[6],xmm5[6],xmm7[7],xmm5[7]
846 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3],xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
847 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3],xmm2[4],xmm12[4],xmm2[5],xmm12[5],xmm2[6],xmm12[6],xmm2[7],xmm12[7]
848 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm13[0],xmm6[1],xmm13[1],xmm6[2],xmm13[2],xmm6[3],xmm13[3],xmm6[4],xmm13[4],xmm6[5],xmm13[5],xmm6[6],xmm13[6],xmm6[7],xmm13[7]
849 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm2[0],xmm6[1],xmm2[1],xmm6[2],xmm2[2],xmm6[3],xmm2[3],xmm6[4],xmm2[4],xmm6[5],xmm2[5],xmm6[6],xmm2[6],xmm6[7],xmm2[7]
850 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
851 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3],xmm1[4],xmm15[4],xmm1[5],xmm15[5],xmm1[6],xmm15[6],xmm1[7],xmm15[7]
852 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
853 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
854 ; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
855 ; SSSE3-NEXT: retq
856 ;
857 ; SSE41-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
858 ; SSE41: # BB#0:
859 ; SSE41-NEXT: pushq %rbp
860 ; SSE41-NEXT: pushq %r15
861 ; SSE41-NEXT: pushq %r14
862 ; SSE41-NEXT: pushq %r13
863 ; SSE41-NEXT: pushq %r12
864 ; SSE41-NEXT: pushq %rbx
865 ; SSE41-NEXT: movsbq (%rdi), %rax
866 ; SSE41-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
867 ; SSE41-NEXT: movsbq 1(%rdi), %r15
868 ; SSE41-NEXT: movsbq 2(%rdi), %r8
869 ; SSE41-NEXT: movsbq 3(%rdi), %r9
870 ; SSE41-NEXT: movsbq 4(%rdi), %r10
871 ; SSE41-NEXT: movsbq 5(%rdi), %r11
872 ; SSE41-NEXT: movsbq 6(%rdi), %r14
873 ; SSE41-NEXT: movsbq 7(%rdi), %r12
874 ; SSE41-NEXT: movsbq 8(%rdi), %r13
875 ; SSE41-NEXT: movsbq 9(%rdi), %rdx
876 ; SSE41-NEXT: movsbq 10(%rdi), %rcx
877 ; SSE41-NEXT: movsbq 11(%rdi), %rsi
878 ; SSE41-NEXT: movsbq 12(%rdi), %rbx
879 ; SSE41-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp
880 ; SSE41-NEXT: movzbl (%rax,%rbp), %eax
881 ; SSE41-NEXT: movd %eax, %xmm0
882 ; SSE41-NEXT: movsbq 13(%rdi), %rax
883 ; SSE41-NEXT: pinsrb $1, (%r15,%rbp), %xmm0
884 ; SSE41-NEXT: movsbq 14(%rdi), %r15
885 ; SSE41-NEXT: movsbq 15(%rdi), %rdi
886 ; SSE41-NEXT: movzbl (%rdi,%rbp), %edi
887 ; SSE41-NEXT: movzbl (%r15,%rbp), %r15d
888 ; SSE41-NEXT: movzbl (%rax,%rbp), %eax
889 ; SSE41-NEXT: movzbl (%rbx,%rbp), %ebx
890 ; SSE41-NEXT: movzbl (%rsi,%rbp), %esi
891 ; SSE41-NEXT: movzbl (%rcx,%rbp), %ecx
892 ; SSE41-NEXT: movzbl (%rdx,%rbp), %edx
893 ; SSE41-NEXT: movzbl (%r13,%rbp), %r13d
894 ; SSE41-NEXT: movzbl (%r12,%rbp), %r12d
895 ; SSE41-NEXT: movzbl (%r14,%rbp), %r14d
896 ; SSE41-NEXT: movzbl (%r11,%rbp), %r11d
897 ; SSE41-NEXT: movzbl (%r10,%rbp), %r10d
898 ; SSE41-NEXT: movzbl (%r9,%rbp), %r9d
899 ; SSE41-NEXT: movzbl (%r8,%rbp), %ebp
900 ; SSE41-NEXT: pinsrb $2, %ebp, %xmm0
901 ; SSE41-NEXT: pinsrb $3, %r9d, %xmm0
902 ; SSE41-NEXT: pinsrb $4, %r10d, %xmm0
903 ; SSE41-NEXT: pinsrb $5, %r11d, %xmm0
904 ; SSE41-NEXT: pinsrb $6, %r14d, %xmm0
905 ; SSE41-NEXT: pinsrb $7, %r12d, %xmm0
906 ; SSE41-NEXT: pinsrb $8, %r13d, %xmm0
907 ; SSE41-NEXT: pinsrb $9, %edx, %xmm0
908 ; SSE41-NEXT: pinsrb $10, %ecx, %xmm0
909 ; SSE41-NEXT: pinsrb $11, %esi, %xmm0
910 ; SSE41-NEXT: pinsrb $12, %ebx, %xmm0
911 ; SSE41-NEXT: pinsrb $13, %eax, %xmm0
912 ; SSE41-NEXT: pinsrb $14, %r15d, %xmm0
913 ; SSE41-NEXT: pinsrb $15, %edi, %xmm0
914 ; SSE41-NEXT: popq %rbx
915 ; SSE41-NEXT: popq %r12
916 ; SSE41-NEXT: popq %r13
917 ; SSE41-NEXT: popq %r14
918 ; SSE41-NEXT: popq %r15
919 ; SSE41-NEXT: popq %rbp
920 ; SSE41-NEXT: retq
921 ;
922 ; AVX-LABEL: mem_shuffle_v16i8_v16i8_xxxxxxxxxxxxxxxx_i8:
923 ; AVX: # BB#0:
924 ; AVX-NEXT: pushq %rbp
925 ; AVX-NEXT: pushq %r15
926 ; AVX-NEXT: pushq %r14
927 ; AVX-NEXT: pushq %r13
928 ; AVX-NEXT: pushq %r12
929 ; AVX-NEXT: pushq %rbx
930 ; AVX-NEXT: movsbq (%rdi), %rsi
931 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
932 ; AVX-NEXT: movsbq 1(%rdi), %r15
933 ; AVX-NEXT: movsbq 2(%rdi), %r8
934 ; AVX-NEXT: movsbq 3(%rdi), %r9
935 ; AVX-NEXT: movsbq 4(%rdi), %r10
936 ; AVX-NEXT: movsbq 5(%rdi), %r11
937 ; AVX-NEXT: movsbq 6(%rdi), %r14
938 ; AVX-NEXT: movsbq 7(%rdi), %r12
939 ; AVX-NEXT: movsbq 8(%rdi), %r13
940 ; AVX-NEXT: movsbq 9(%rdi), %rdx
941 ; AVX-NEXT: movsbq 10(%rdi), %rax
942 ; AVX-NEXT: movsbq 11(%rdi), %rcx
943 ; AVX-NEXT: movsbq 12(%rdi), %rbx
944 ; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rbp
945 ; AVX-NEXT: movzbl (%rsi,%rbp), %esi
946 ; AVX-NEXT: vmovd %esi, %xmm0
947 ; AVX-NEXT: movsbq 13(%rdi), %rsi
948 ; AVX-NEXT: vpinsrb $1, (%r15,%rbp), %xmm0, %xmm0
949 ; AVX-NEXT: movsbq 14(%rdi), %r15
950 ; AVX-NEXT: movsbq 15(%rdi), %rdi
951 ; AVX-NEXT: movzbl (%rdi,%rbp), %edi
952 ; AVX-NEXT: movzbl (%r15,%rbp), %r15d
953 ; AVX-NEXT: movzbl (%rsi,%rbp), %esi
954 ; AVX-NEXT: movzbl (%rbx,%rbp), %ebx
955 ; AVX-NEXT: movzbl (%rcx,%rbp), %ecx
956 ; AVX-NEXT: movzbl (%rax,%rbp), %eax
957 ; AVX-NEXT: movzbl (%rdx,%rbp), %edx
958 ; AVX-NEXT: movzbl (%r13,%rbp), %r13d
959 ; AVX-NEXT: movzbl (%r12,%rbp), %r12d
960 ; AVX-NEXT: movzbl (%r14,%rbp), %r14d
961 ; AVX-NEXT: movzbl (%r11,%rbp), %r11d
962 ; AVX-NEXT: movzbl (%r10,%rbp), %r10d
963 ; AVX-NEXT: movzbl (%r9,%rbp), %r9d
964 ; AVX-NEXT: movzbl (%r8,%rbp), %ebp
965 ; AVX-NEXT: vpinsrb $2, %ebp, %xmm0, %xmm0
966 ; AVX-NEXT: vpinsrb $3, %r9d, %xmm0, %xmm0
967 ; AVX-NEXT: vpinsrb $4, %r10d, %xmm0, %xmm0
968 ; AVX-NEXT: vpinsrb $5, %r11d, %xmm0, %xmm0
969 ; AVX-NEXT: vpinsrb $6, %r14d, %xmm0, %xmm0
970 ; AVX-NEXT: vpinsrb $7, %r12d, %xmm0, %xmm0
971 ; AVX-NEXT: vpinsrb $8, %r13d, %xmm0, %xmm0
972 ; AVX-NEXT: vpinsrb $9, %edx, %xmm0, %xmm0
973 ; AVX-NEXT: vpinsrb $10, %eax, %xmm0, %xmm0
974 ; AVX-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
975 ; AVX-NEXT: vpinsrb $12, %ebx, %xmm0, %xmm0
976 ; AVX-NEXT: vpinsrb $13, %esi, %xmm0, %xmm0
977 ; AVX-NEXT: vpinsrb $14, %r15d, %xmm0, %xmm0
978 ; AVX-NEXT: vpinsrb $15, %edi, %xmm0, %xmm0
979 ; AVX-NEXT: popq %rbx
980 ; AVX-NEXT: popq %r12
981 ; AVX-NEXT: popq %r13
982 ; AVX-NEXT: popq %r14
983 ; AVX-NEXT: popq %r15
984 ; AVX-NEXT: popq %rbp
985 ; AVX-NEXT: retq
986 %p0 = getelementptr inbounds i8, i8* %i, i64 0
987 %p1 = getelementptr inbounds i8, i8* %i, i64 1
988 %p2 = getelementptr inbounds i8, i8* %i, i64 2
989 %p3 = getelementptr inbounds i8, i8* %i, i64 3
990 %p4 = getelementptr inbounds i8, i8* %i, i64 4
991 %p5 = getelementptr inbounds i8, i8* %i, i64 5
992 %p6 = getelementptr inbounds i8, i8* %i, i64 6
993 %p7 = getelementptr inbounds i8, i8* %i, i64 7
994 %p8 = getelementptr inbounds i8, i8* %i, i64 8
995 %p9 = getelementptr inbounds i8, i8* %i, i64 9
996 %p10 = getelementptr inbounds i8, i8* %i, i64 10
997 %p11 = getelementptr inbounds i8, i8* %i, i64 11
998 %p12 = getelementptr inbounds i8, i8* %i, i64 12
999 %p13 = getelementptr inbounds i8, i8* %i, i64 13
1000 %p14 = getelementptr inbounds i8, i8* %i, i64 14
1001 %p15 = getelementptr inbounds i8, i8* %i, i64 15
1002 %i0 = load i8, i8* %p0 , align 4
1003 %i1 = load i8, i8* %p1 , align 4
1004 %i2 = load i8, i8* %p2 , align 4
1005 %i3 = load i8, i8* %p3 , align 4
1006 %i4 = load i8, i8* %p4 , align 4
1007 %i5 = load i8, i8* %p5 , align 4
1008 %i6 = load i8, i8* %p6 , align 4
1009 %i7 = load i8, i8* %p7 , align 4
1010 %i8 = load i8, i8* %p8 , align 4
1011 %i9 = load i8, i8* %p9 , align 4
1012 %i10 = load i8, i8* %p10, align 4
1013 %i11 = load i8, i8* %p11, align 4
1014 %i12 = load i8, i8* %p12, align 4
1015 %i13 = load i8, i8* %p13, align 4
1016 %i14 = load i8, i8* %p14, align 4
1017 %i15 = load i8, i8* %p15, align 4
1018 %x0 = extractelement <16 x i8> %x, i8 %i0
1019 %x1 = extractelement <16 x i8> %x, i8 %i1
1020 %x2 = extractelement <16 x i8> %x, i8 %i2
1021 %x3 = extractelement <16 x i8> %x, i8 %i3
1022 %x4 = extractelement <16 x i8> %x, i8 %i4
1023 %x5 = extractelement <16 x i8> %x, i8 %i5
1024 %x6 = extractelement <16 x i8> %x, i8 %i6
1025 %x7 = extractelement <16 x i8> %x, i8 %i7
1026 %x8 = extractelement <16 x i8> %x, i8 %i8
1027 %x9 = extractelement <16 x i8> %x, i8 %i9
1028 %x10 = extractelement <16 x i8> %x, i8 %i10
1029 %x11 = extractelement <16 x i8> %x, i8 %i11
1030 %x12 = extractelement <16 x i8> %x, i8 %i12
1031 %x13 = extractelement <16 x i8> %x, i8 %i13
1032 %x14 = extractelement <16 x i8> %x, i8 %i14
1033 %x15 = extractelement <16 x i8> %x, i8 %i15
1034 %r0 = insertelement <16 x i8> undef, i8 %x0 , i32 0
1035 %r1 = insertelement <16 x i8> %r0 , i8 %x1 , i32 1
1036 %r2 = insertelement <16 x i8> %r1 , i8 %x2 , i32 2
1037 %r3 = insertelement <16 x i8> %r2 , i8 %x3 , i32 3
1038 %r4 = insertelement <16 x i8> %r3 , i8 %x4 , i32 4
1039 %r5 = insertelement <16 x i8> %r4 , i8 %x5 , i32 5
1040 %r6 = insertelement <16 x i8> %r5 , i8 %x6 , i32 6
1041 %r7 = insertelement <16 x i8> %r6 , i8 %x7 , i32 7
1042 %r8 = insertelement <16 x i8> %r7 , i8 %x8 , i32 8
1043 %r9 = insertelement <16 x i8> %r8 , i8 %x9 , i32 9
1044 %r10 = insertelement <16 x i8> %r9 , i8 %x10, i32 10
1045 %r11 = insertelement <16 x i8> %r10, i8 %x11, i32 11
1046 %r12 = insertelement <16 x i8> %r11, i8 %x12, i32 12
1047 %r13 = insertelement <16 x i8> %r12, i8 %x13, i32 13
1048 %r14 = insertelement <16 x i8> %r13, i8 %x14, i32 14
1049 %r15 = insertelement <16 x i8> %r14, i8 %x15, i32 15
1050 ret <16 x i8> %r15
1051 }
1052
1053 ;
1054 ; Binary shuffle indices from registers
1055 ;
1056
1057 define <4 x float> @var_shuffle_v4f32_v4f32_x0yx_i32(<4 x float> %x, <4 x float> %y, i32 %i0, i32 %i1, i32 %i2, i32 %i3) nounwind {
1058 ; SSE-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
1059 ; SSE: # BB#0:
1060 ; SSE-NEXT: movslq %edi, %rax
1061 ; SSE-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
1062 ; SSE-NEXT: movslq %edx, %rdx
1063 ; SSE-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
1064 ; SSE-NEXT: movslq %ecx, %rcx
1065 ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1066 ; SSE-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
1067 ; SSE-NEXT: movss {{.*#+}} xmm2 = mem[0],zero,zero,zero
1068 ; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1069 ; SSE-NEXT: unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1070 ; SSE-NEXT: retq
1071 ;
1072 ; AVX-LABEL: var_shuffle_v4f32_v4f32_x0yx_i32:
1073 ; AVX: # BB#0:
1074 ; AVX-NEXT: movslq %edi, %rax
1075 ; AVX-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp)
1076 ; AVX-NEXT: movslq %edx, %rdx
1077 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
1078 ; AVX-NEXT: movslq %ecx, %rcx
1079 ; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
1080 ; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
1081 ; AVX-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
1082 ; AVX-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
1083 ; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0]
1084 ; AVX-NEXT: retq
1085 %x0 = extractelement <4 x float> %x, i32 %i0
1086 %x1 = extractelement <4 x float> %x, i32 %i1
1087 %y2 = extractelement <4 x float> %y, i32 %i2
1088 %x3 = extractelement <4 x float> %x, i32 %i3
1089 %r0 = insertelement <4 x float> undef, float %x0, i32 0
1090 %r1 = insertelement <4 x float> %r0, float 0.0, i32 1
1091 %r2 = insertelement <4 x float> %r1, float %y2, i32 2
1092 %r3 = insertelement <4 x float> %r2, float %x3, i32 3
1093 ret <4 x float> %r3
1094 }
1095
1096 define <8 x i16> @var_shuffle_v8i16_v8i16_xyxyxy00_i16(<8 x i16> %x, <8 x i16> %y, i16 %i0, i16 %i1, i16 %i2, i16 %i3, i16 %i4, i16 %i5, i16 %i6, i16 %i7) nounwind {
1097 ; SSE2-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
1098 ; SSE2: # BB#0:
1099 ; SSE2-NEXT: movswq %di, %r10
1100 ; SSE2-NEXT: movswq %si, %rsi
1101 ; SSE2-NEXT: movswq %dx, %r11
1102 ; SSE2-NEXT: movswq %cx, %rcx
1103 ; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
1104 ; SSE2-NEXT: movswq %r8w, %rdi
1105 ; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
1106 ; SSE2-NEXT: movswq %r9w, %rax
1107 ; SSE2-NEXT: movzwl -24(%rsp,%rsi,2), %esi
1108 ; SSE2-NEXT: xorl %edx, %edx
1109 ; SSE2-NEXT: movd %edx, %xmm0
1110 ; SSE2-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
1111 ; SSE2-NEXT: movd %ecx, %xmm1
1112 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1113 ; SSE2-NEXT: movd %esi, %xmm2
1114 ; SSE2-NEXT: movzwl -24(%rsp,%rax,2), %eax
1115 ; SSE2-NEXT: movd %eax, %xmm3
1116 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
1117 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1118 ; SSE2-NEXT: movzwl -40(%rsp,%r10,2), %eax
1119 ; SSE2-NEXT: movzwl -40(%rsp,%r11,2), %ecx
1120 ; SSE2-NEXT: movd %ecx, %xmm1
1121 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1122 ; SSE2-NEXT: movd %eax, %xmm0
1123 ; SSE2-NEXT: movzwl -40(%rsp,%rdi,2), %eax
1124 ; SSE2-NEXT: movd %eax, %xmm3
1125 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
1126 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1127 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1128 ; SSE2-NEXT: retq
1129 ;
1130 ; SSSE3-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
1131 ; SSSE3: # BB#0:
1132 ; SSSE3-NEXT: movswq %di, %r10
1133 ; SSSE3-NEXT: movswq %si, %rsi
1134 ; SSSE3-NEXT: movswq %dx, %r11
1135 ; SSSE3-NEXT: movswq %cx, %rcx
1136 ; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
1137 ; SSSE3-NEXT: movswq %r8w, %rdi
1138 ; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
1139 ; SSSE3-NEXT: movswq %r9w, %rax
1140 ; SSSE3-NEXT: movzwl -24(%rsp,%rsi,2), %esi
1141 ; SSSE3-NEXT: xorl %edx, %edx
1142 ; SSSE3-NEXT: movd %edx, %xmm0
1143 ; SSSE3-NEXT: movzwl -24(%rsp,%rcx,2), %ecx
1144 ; SSSE3-NEXT: movd %ecx, %xmm1
1145 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1146 ; SSSE3-NEXT: movd %esi, %xmm2
1147 ; SSSE3-NEXT: movzwl -24(%rsp,%rax,2), %eax
1148 ; SSSE3-NEXT: movd %eax, %xmm3
1149 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
1150 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1151 ; SSSE3-NEXT: movzwl -40(%rsp,%r10,2), %eax
1152 ; SSSE3-NEXT: movzwl -40(%rsp,%r11,2), %ecx
1153 ; SSSE3-NEXT: movd %ecx, %xmm1
1154 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
1155 ; SSSE3-NEXT: movd %eax, %xmm0
1156 ; SSSE3-NEXT: movzwl -40(%rsp,%rdi,2), %eax
1157 ; SSSE3-NEXT: movd %eax, %xmm3
1158 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
1159 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1160 ; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
1161 ; SSSE3-NEXT: retq
1162 ;
1163 ; SSE41-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
1164 ; SSE41: # BB#0:
1165 ; SSE41-NEXT: movswq %di, %rax
1166 ; SSE41-NEXT: movswq %si, %rsi
1167 ; SSE41-NEXT: movswq %dx, %rdx
1168 ; SSE41-NEXT: movswq %cx, %r10
1169 ; SSE41-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp)
1170 ; SSE41-NEXT: movswq %r8w, %rdi
1171 ; SSE41-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
1172 ; SSE41-NEXT: movswq %r9w, %rcx
1173 ; SSE41-NEXT: movzwl -40(%rsp,%rax,2), %eax
1174 ; SSE41-NEXT: movd %eax, %xmm1
1175 ; SSE41-NEXT: pinsrw $1, -24(%rsp,%rsi,2), %xmm1
1176 ; SSE41-NEXT: pinsrw $2, -40(%rsp,%rdx,2), %xmm1
1177 ; SSE41-NEXT: pinsrw $3, -24(%rsp,%r10,2), %xmm1
1178 ; SSE41-NEXT: pinsrw $4, -40(%rsp,%rdi,2), %xmm1
1179 ; SSE41-NEXT: pinsrw $5, -24(%rsp,%rcx,2), %xmm1
1180 ; SSE41-NEXT: pxor %xmm0, %xmm0
1181 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7]
1182 ; SSE41-NEXT: retq
1183 ;
1184 ; AVX-LABEL: var_shuffle_v8i16_v8i16_xyxyxy00_i16:
1185 ; AVX: # BB#0:
1186 ; AVX-NEXT: movswq %di, %r10
1187 ; AVX-NEXT: movswq %si, %r11
1188 ; AVX-NEXT: movswq %dx, %rdx
1189 ; AVX-NEXT: movswq %cx, %rcx
1190 ; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
1191 ; AVX-NEXT: movswq %r8w, %rdi
1192 ; AVX-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp)
1193 ; AVX-NEXT: movswq %r9w, %rax
1194 ; AVX-NEXT: movzwl -40(%rsp,%r10,2), %esi
1195 ; AVX-NEXT: vmovd %esi, %xmm0
1196 ; AVX-NEXT: vpinsrw $1, -24(%rsp,%r11,2), %xmm0, %xmm0
1197 ; AVX-NEXT: vpinsrw $2, -40(%rsp,%rdx,2), %xmm0, %xmm0
1198 ; AVX-NEXT: vpinsrw $3, -24(%rsp,%rcx,2), %xmm0, %xmm0
1199 ; AVX-NEXT: vpinsrw $4, -40(%rsp,%rdi,2), %xmm0, %xmm0
1200 ; AVX-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm0, %xmm0
1201 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
1202 ; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7]
1203 ; AVX-NEXT: retq
1204 %x0 = extractelement <8 x i16> %x, i16 %i0
1205 %y1 = extractelement <8 x i16> %y, i16 %i1
1206 %x2 = extractelement <8 x i16> %x, i16 %i2
1207 %y3 = extractelement <8 x i16> %y, i16 %i3
1208 %x4 = extractelement <8 x i16> %x, i16 %i4
1209 %y5 = extractelement <8 x i16> %y, i16 %i5
1210 %x6 = extractelement <8 x i16> %x, i16 %i6
1211 %x7 = extractelement <8 x i16> %x, i16 %i7
1212 %r0 = insertelement <8 x i16> undef, i16 %x0, i32 0
1213 %r1 = insertelement <8 x i16> %r0, i16 %y1, i32 1
1214 %r2 = insertelement <8 x i16> %r1, i16 %x2, i32 2
1215 %r3 = insertelement <8 x i16> %r2, i16 %y3, i32 3
1216 %r4 = insertelement <8 x i16> %r3, i16 %x4, i32 4
1217 %r5 = insertelement <8 x i16> %r4, i16 %y5, i32 5
1218 %r6 = insertelement <8 x i16> %r5, i16 0, i32 6
1219 %r7 = insertelement <8 x i16> %r6, i16 0, i32 7
1220 ret <8 x i16> %r7
1221 }
0 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
3
4 ;
5 ; Unary shuffle indices from registers
6 ;
7
8 define <4 x double> @var_shuffle_v4f64_v4f64_xxxx_i64(<4 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
9 ; ALL-LABEL: var_shuffle_v4f64_v4f64_xxxx_i64:
10 ; ALL: # BB#0:
11 ; ALL-NEXT: pushq %rbp
12 ; ALL-NEXT: movq %rsp, %rbp
13 ; ALL-NEXT: andq $-32, %rsp
14 ; ALL-NEXT: subq $64, %rsp
15 ; ALL-NEXT: vmovaps %ymm0, (%rsp)
16 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
17 ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
18 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
19 ; ALL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
20 ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
21 ; ALL-NEXT: movq %rbp, %rsp
22 ; ALL-NEXT: popq %rbp
23 ; ALL-NEXT: retq
24 %x0 = extractelement <4 x double> %x, i64 %i0
25 %x1 = extractelement <4 x double> %x, i64 %i1
26 %x2 = extractelement <4 x double> %x, i64 %i2
27 %x3 = extractelement <4 x double> %x, i64 %i3
28 %r0 = insertelement <4 x double> undef, double %x0, i32 0
29 %r1 = insertelement <4 x double> %r0, double %x1, i32 1
30 %r2 = insertelement <4 x double> %r1, double %x2, i32 2
31 %r3 = insertelement <4 x double> %r2, double %x3, i32 3
32 ret <4 x double> %r3
33 }
34
35 define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
36 ; ALL-LABEL: var_shuffle_v4f64_v4f64_uxx0_i64:
37 ; ALL: # BB#0:
38 ; ALL-NEXT: pushq %rbp
39 ; ALL-NEXT: movq %rsp, %rbp
40 ; ALL-NEXT: andq $-32, %rsp
41 ; ALL-NEXT: subq $64, %rsp
42 ; ALL-NEXT: vmovaps %ymm0, (%rsp)
43 ; ALL-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0]
44 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
45 ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
46 ; ALL-NEXT: movq %rbp, %rsp
47 ; ALL-NEXT: popq %rbp
48 ; ALL-NEXT: retq
49 %x0 = extractelement <4 x double> %x, i64 %i0
50 %x1 = extractelement <4 x double> %x, i64 %i1
51 %x2 = extractelement <4 x double> %x, i64 %i2
52 %x3 = extractelement <4 x double> %x, i64 %i3
53 %r0 = insertelement <4 x double> undef, double undef, i32 0
54 %r1 = insertelement <4 x double> %r0, double %x1, i32 1
55 %r2 = insertelement <4 x double> %r1, double %x2, i32 2
56 %r3 = insertelement <4 x double> %r2, double 0.0, i32 3
57 ret <4 x double> %r3
58 }
59
60 define <4 x double> @var_shuffle_v4f64_v2f64_xxxx_i64(<2 x double> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
61 ; ALL-LABEL: var_shuffle_v4f64_v2f64_xxxx_i64:
62 ; ALL: # BB#0:
63 ; ALL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
64 ; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
65 ; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
66 ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
67 ; ALL-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
68 ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
69 ; ALL-NEXT: retq
70 %x0 = extractelement <2 x double> %x, i64 %i0
71 %x1 = extractelement <2 x double> %x, i64 %i1
72 %x2 = extractelement <2 x double> %x, i64 %i2
73 %x3 = extractelement <2 x double> %x, i64 %i3
74 %r0 = insertelement <4 x double> undef, double %x0, i32 0
75 %r1 = insertelement <4 x double> %r0, double %x1, i32 1
76 %r2 = insertelement <4 x double> %r1, double %x2, i32 2
77 %r3 = insertelement <4 x double> %r2, double %x3, i32 3
78 ret <4 x double> %r3
79 }
80
81 define <4 x i64> @var_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
82 ; AVX1-LABEL: var_shuffle_v4i64_v4i64_xxxx_i64:
83 ; AVX1: # BB#0:
84 ; AVX1-NEXT: pushq %rbp
85 ; AVX1-NEXT: movq %rsp, %rbp
86 ; AVX1-NEXT: andq $-32, %rsp
87 ; AVX1-NEXT: subq $64, %rsp
88 ; AVX1-NEXT: vmovaps %ymm0, (%rsp)
89 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
90 ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
91 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
92 ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
93 ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
94 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
95 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
96 ; AVX1-NEXT: movq %rbp, %rsp
97 ; AVX1-NEXT: popq %rbp
98 ; AVX1-NEXT: retq
99 ;
100 ; AVX2-LABEL: var_shuffle_v4i64_v4i64_xxxx_i64:
101 ; AVX2: # BB#0:
102 ; AVX2-NEXT: pushq %rbp
103 ; AVX2-NEXT: movq %rsp, %rbp
104 ; AVX2-NEXT: andq $-32, %rsp
105 ; AVX2-NEXT: subq $64, %rsp
106 ; AVX2-NEXT: vmovaps %ymm0, (%rsp)
107 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
108 ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
109 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
110 ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
111 ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
112 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
113 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
114 ; AVX2-NEXT: movq %rbp, %rsp
115 ; AVX2-NEXT: popq %rbp
116 ; AVX2-NEXT: retq
117 %x0 = extractelement <4 x i64> %x, i64 %i0
118 %x1 = extractelement <4 x i64> %x, i64 %i1
119 %x2 = extractelement <4 x i64> %x, i64 %i2
120 %x3 = extractelement <4 x i64> %x, i64 %i3
121 %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
122 %r1 = insertelement <4 x i64> %r0, i64 %x1, i32 1
123 %r2 = insertelement <4 x i64> %r1, i64 %x2, i32 2
124 %r3 = insertelement <4 x i64> %r2, i64 %x3, i32 3
125 ret <4 x i64> %r3
126 }
127
128 define <4 x i64> @var_shuffle_v4i64_v4i64_xx00_i64(<4 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
129 ; AVX1-LABEL: var_shuffle_v4i64_v4i64_xx00_i64:
130 ; AVX1: # BB#0:
131 ; AVX1-NEXT: pushq %rbp
132 ; AVX1-NEXT: movq %rsp, %rbp
133 ; AVX1-NEXT: andq $-32, %rsp
134 ; AVX1-NEXT: subq $64, %rsp
135 ; AVX1-NEXT: vmovaps %ymm0, (%rsp)
136 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
137 ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
138 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
139 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
140 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
141 ; AVX1-NEXT: movq %rbp, %rsp
142 ; AVX1-NEXT: popq %rbp
143 ; AVX1-NEXT: retq
144 ;
145 ; AVX2-LABEL: var_shuffle_v4i64_v4i64_xx00_i64:
146 ; AVX2: # BB#0:
147 ; AVX2-NEXT: pushq %rbp
148 ; AVX2-NEXT: movq %rsp, %rbp
149 ; AVX2-NEXT: andq $-32, %rsp
150 ; AVX2-NEXT: subq $64, %rsp
151 ; AVX2-NEXT: vmovaps %ymm0, (%rsp)
152 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
153 ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
154 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
155 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
156 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
157 ; AVX2-NEXT: movq %rbp, %rsp
158 ; AVX2-NEXT: popq %rbp
159 ; AVX2-NEXT: retq
160 %x0 = extractelement <4 x i64> %x, i64 %i0
161 %x1 = extractelement <4 x i64> %x, i64 %i1
162 %x2 = extractelement <4 x i64> %x, i64 %i2
163 %x3 = extractelement <4 x i64> %x, i64 %i3
164 %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
165 %r1 = insertelement <4 x i64> %r0, i64 %x1, i32 1
166 %r2 = insertelement <4 x i64> %r1, i64 0, i32 2
167 %r3 = insertelement <4 x i64> %r2, i64 0, i32 3
168 ret <4 x i64> %r3
169 }
170
171 define <4 x i64> @var_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64 %i0, i64 %i1, i64 %i2, i64 %i3) nounwind {
172 ; AVX1-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
173 ; AVX1: # BB#0:
174 ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
175 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
176 ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
177 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
178 ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
179 ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
180 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
181 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
182 ; AVX1-NEXT: retq
183 ;
184 ; AVX2-LABEL: var_shuffle_v4i64_v2i64_xxxx_i64:
185 ; AVX2: # BB#0:
186 ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
187 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
188 ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
189 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
190 ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
191 ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
192 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
193 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
194 ; AVX2-NEXT: retq
195 %x0 = extractelement <2 x i64> %x, i64 %i0
196 %x1 = extractelement <2 x i64> %x, i64 %i1
197 %x2 = extractelement <2 x i64> %x, i64 %i2
198 %x3 = extractelement <2 x i64> %x, i64 %i3
199 %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
200 %r1 = insertelement <4 x i64> %r0, i64 %x1, i32 1
201 %r2 = insertelement <4 x i64> %r1, i64 %x2, i32 2
202 %r3 = insertelement <4 x i64> %r2, i64 %x3, i32 3
203 ret <4 x i64> %r3
204 }
205
206 define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
207 ; AVX1-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
208 ; AVX1: # BB#0:
209 ; AVX1-NEXT: pushq %rbp
210 ; AVX1-NEXT: movq %rsp, %rbp
211 ; AVX1-NEXT: andq $-32, %rsp
212 ; AVX1-NEXT: subq $64, %rsp
213 ; AVX1-NEXT: movslq %edi, %rax
214 ; AVX1-NEXT: movslq %esi, %rsi
215 ; AVX1-NEXT: movslq %edx, %rdx
216 ; AVX1-NEXT: movslq %ecx, %r11
217 ; AVX1-NEXT: movslq %r8d, %r10
218 ; AVX1-NEXT: vmovaps %ymm0, (%rsp)
219 ; AVX1-NEXT: movslq %r9d, %r8
220 ; AVX1-NEXT: movslq 16(%rbp), %rdi
221 ; AVX1-NEXT: movslq 24(%rbp), %rcx
222 ; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
223 ; AVX1-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
224 ; AVX1-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
225 ; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
226 ; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
227 ; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
228 ; AVX1-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
229 ; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3]
230 ; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3]
231 ; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
232 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
233 ; AVX1-NEXT: movq %rbp, %rsp
234 ; AVX1-NEXT: popq %rbp
235 ; AVX1-NEXT: retq
236 ;
237 ; AVX2-LABEL: var_shuffle_v8f32_v8f32_xxxxxxxx_i32:
238 ; AVX2: # BB#0:
239 ; AVX2-NEXT: vmovd %edi, %xmm1
240 ; AVX2-NEXT: vpermps %ymm0, %ymm1, %ymm1
241 ; AVX2-NEXT: vmovd %esi, %xmm2
242 ; AVX2-NEXT: vpermps %ymm0, %ymm2, %ymm2
243 ; AVX2-NEXT: vmovd %edx, %xmm3
244 ; AVX2-NEXT: vpermps %ymm0, %ymm3, %ymm3
245 ; AVX2-NEXT: vmovd %ecx, %xmm4
246 ; AVX2-NEXT: vpermps %ymm0, %ymm4, %ymm4
247 ; AVX2-NEXT: vmovd %r8d, %xmm5
248 ; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm5
249 ; AVX2-NEXT: vmovd %r9d, %xmm6
250 ; AVX2-NEXT: vpermps %ymm0, %ymm6, %ymm6
251 ; AVX2-NEXT: vmovd {{.*#+}} xmm7 = mem[0],zero,zero,zero
252 ; AVX2-NEXT: vpermps %ymm0, %ymm7, %ymm7
253 ; AVX2-NEXT: vmovd {{.*#+}} xmm8 = mem[0],zero,zero,zero
254 ; AVX2-NEXT: vpermps %ymm0, %ymm8, %ymm0
255 ; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
256 ; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3]
257 ; AVX2-NEXT: vinsertps {{.*#+}} xmm0 = xmm5[0,1,2],xmm0[0]
258 ; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[2,3]
259 ; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],xmm3[0],xmm1[3]
260 ; AVX2-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm4[0]
261 ; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
262 ; AVX2-NEXT: retq
263 %x0 = extractelement <8 x float> %x, i32 %i0
264 %x1 = extractelement <8 x float> %x, i32 %i1
265 %x2 = extractelement <8 x float> %x, i32 %i2
266 %x3 = extractelement <8 x float> %x, i32 %i3
267 %x4 = extractelement <8 x float> %x, i32 %i4
268 %x5 = extractelement <8 x float> %x, i32 %i5
269 %x6 = extractelement <8 x float> %x, i32 %i6
270 %x7 = extractelement <8 x float> %x, i32 %i7
271 %r0 = insertelement <8 x float> undef, float %x0, i32 0
272 %r1 = insertelement <8 x float> %r0, float %x1, i32 1
273 %r2 = insertelement <8 x float> %r1, float %x2, i32 2
274 %r3 = insertelement <8 x float> %r2, float %x3, i32 3
275 %r4 = insertelement <8 x float> %r3, float %x4, i32 4
276 %r5 = insertelement <8 x float> %r4, float %x5, i32 5
277 %r6 = insertelement <8 x float> %r5, float %x6, i32 6
278 %r7 = insertelement <8 x float> %r6, float %x7, i32 7
279 ret <8 x float> %r7
280 }
281
282 define <8 x float> @var_shuffle_v8f32_v4f32_xxxxxxxx_i32(<4 x float> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7) nounwind {
283 ; ALL-LABEL: var_shuffle_v8f32_v4f32_xxxxxxxx_i32:
284 ; ALL: # BB#0:
285 ; ALL-NEXT: movslq %edi, %rax
286 ; ALL-NEXT: movslq %esi, %rsi
287 ; ALL-NEXT: movslq %edx, %rdx
288 ; ALL-NEXT: movslq %ecx, %r11
289 ; ALL-NEXT: movslq %r8d, %r10
290 ; ALL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
291 ; ALL-NEXT: movslq %r9d, %r8
292 ; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rdi
293 ; ALL-NEXT: movslq {{[0-9]+}}(%rsp), %rcx
294 ; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
295 ; ALL-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
296 ; ALL-NEXT: vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
297 ; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[2,3]
298 ; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
299 ; ALL-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
300 ; ALL-NEXT: vmovss {{.*#+}} xmm3 = mem[0],zero,zero,zero
301 ; ALL-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0],mem[0],xmm3[2,3]
302 ; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm3[0,1],xmm0[0],xmm3[3]
303 ; ALL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
304 ; ALL-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
305 ; ALL-NEXT: retq
306 %x0 = extractelement <4 x float> %x, i32 %i0
307 %x1 = extractelement <4 x float> %x, i32 %i1
308 %x2 = extractelement <4 x float> %x, i32 %i2
309 %x3 = extractelement <4 x float> %x, i32 %i3
310 %x4 = extractelement <4 x float> %x, i32 %i4
311 %x5 = extractelement <4 x float> %x, i32 %i5
312 %x6 = extractelement <4 x float> %x, i32 %i6
313 %x7 = extractelement <4 x float> %x, i32 %i7
314 %r0 = insertelement <8 x float> undef, float %x0, i32 0
315 %r1 = insertelement <8 x float> %r0, float %x1, i32 1
316 %r2 = insertelement <8 x float> %r1, float %x2, i32 2
317 %r3 = insertelement <8 x float> %r2, float %x3, i32 3
318 %r4 = insertelement <8 x float> %r3, float %x4, i32 4
319 %r5 = insertelement <8 x float> %r4, float %x5, i32 5
320 %r6 = insertelement <8 x float> %r5, float %x6, i32 6
321 %r7 = insertelement <8 x float> %r6, float %x7, i32 7
322 ret <8 x float> %r7
323 }
324
325 define <16 x i16> @var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16(<16 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
326 ; AVX1-LABEL: var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16:
327 ; AVX1: # BB#0:
328 ; AVX1-NEXT: pushq %rbp
329 ; AVX1-NEXT: movq %rsp, %rbp
330 ; AVX1-NEXT: andq $-32, %rsp
331 ; AVX1-NEXT: subq $64, %rsp
332 ; AVX1-NEXT: vmovaps %ymm0, (%rsp)
333 ; AVX1-NEXT: movslq 32(%rbp), %rax
334 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
335 ; AVX1-NEXT: vmovd %eax, %xmm0
336 ; AVX1-NEXT: movslq 40(%rbp), %rax
337 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
338 ; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
339 ; AVX1-NEXT: movslq 48(%rbp), %rax
340 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
341 ; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
342 ; AVX1-NEXT: movslq 56(%rbp), %rax
343 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
344 ; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
345 ; AVX1-NEXT: movslq 64(%rbp), %rax
346 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
347 ; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
348 ; AVX1-NEXT: movslq 72(%rbp), %rax
349 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
350 ; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
351 ; AVX1-NEXT: movslq 80(%rbp), %rax
352 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
353 ; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
354 ; AVX1-NEXT: movslq 88(%rbp), %rax
355 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
356 ; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
357 ; AVX1-NEXT: movslq %edi, %rax
358 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
359 ; AVX1-NEXT: vmovd %eax, %xmm1
360 ; AVX1-NEXT: movslq %esi, %rax
361 ; AVX1-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm1, %xmm1
362 ; AVX1-NEXT: movslq %edx, %rax
363 ; AVX1-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm1, %xmm1
364 ; AVX1-NEXT: movslq %ecx, %rax
365 ; AVX1-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm1, %xmm1
366 ; AVX1-NEXT: movslq %r8d, %rax
367 ; AVX1-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm1, %xmm1
368 ; AVX1-NEXT: movslq %r9d, %rax
369 ; AVX1-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1
370 ; AVX1-NEXT: movslq 16(%rbp), %rax
371 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
372 ; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
373 ; AVX1-NEXT: movslq 24(%rbp), %rax
374 ; AVX1-NEXT: movzwl (%rsp,%rax,2), %eax
375 ; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
376 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
377 ; AVX1-NEXT: movq %rbp, %rsp
378 ; AVX1-NEXT: popq %rbp
379 ; AVX1-NEXT: retq
380 ;
381 ; AVX2-LABEL: var_shuffle_v16i16_v16i16_xxxxxxxxxxxxxxxx_i16:
382 ; AVX2: # BB#0:
383 ; AVX2-NEXT: pushq %rbp
384 ; AVX2-NEXT: movq %rsp, %rbp
385 ; AVX2-NEXT: andq $-32, %rsp
386 ; AVX2-NEXT: subq $64, %rsp
387 ; AVX2-NEXT: vmovaps %ymm0, (%rsp)
388 ; AVX2-NEXT: movslq 32(%rbp), %rax
389 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
390 ; AVX2-NEXT: vmovd %eax, %xmm0
391 ; AVX2-NEXT: movslq 40(%rbp), %rax
392 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
393 ; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
394 ; AVX2-NEXT: movslq 48(%rbp), %rax
395 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
396 ; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
397 ; AVX2-NEXT: movslq 56(%rbp), %rax
398 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
399 ; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
400 ; AVX2-NEXT: movslq 64(%rbp), %rax
401 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
402 ; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
403 ; AVX2-NEXT: movslq 72(%rbp), %rax
404 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
405 ; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
406 ; AVX2-NEXT: movslq 80(%rbp), %rax
407 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
408 ; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
409 ; AVX2-NEXT: movslq 88(%rbp), %rax
410 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
411 ; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
412 ; AVX2-NEXT: movslq %edi, %rax
413 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
414 ; AVX2-NEXT: vmovd %eax, %xmm1
415 ; AVX2-NEXT: movslq %esi, %rax
416 ; AVX2-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm1, %xmm1
417 ; AVX2-NEXT: movslq %edx, %rax
418 ; AVX2-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm1, %xmm1
419 ; AVX2-NEXT: movslq %ecx, %rax
420 ; AVX2-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm1, %xmm1
421 ; AVX2-NEXT: movslq %r8d, %rax
422 ; AVX2-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm1, %xmm1
423 ; AVX2-NEXT: movslq %r9d, %rax
424 ; AVX2-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm1, %xmm1
425 ; AVX2-NEXT: movslq 16(%rbp), %rax
426 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
427 ; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
428 ; AVX2-NEXT: movslq 24(%rbp), %rax
429 ; AVX2-NEXT: movzwl (%rsp,%rax,2), %eax
430 ; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
431 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
432 ; AVX2-NEXT: movq %rbp, %rsp
433 ; AVX2-NEXT: popq %rbp
434 ; AVX2-NEXT: retq
435 %x0 = extractelement <16 x i16> %x, i32 %i0
436 %x1 = extractelement <16 x i16> %x, i32 %i1
437 %x2 = extractelement <16 x i16> %x, i32 %i2
438 %x3 = extractelement <16 x i16> %x, i32 %i3
439 %x4 = extractelement <16 x i16> %x, i32 %i4
440 %x5 = extractelement <16 x i16> %x, i32 %i5
441 %x6 = extractelement <16 x i16> %x, i32 %i6
442 %x7 = extractelement <16 x i16> %x, i32 %i7
443 %x8 = extractelement <16 x i16> %x, i32 %i8
444 %x9 = extractelement <16 x i16> %x, i32 %i9
445 %x10 = extractelement <16 x i16> %x, i32 %i10
446 %x11 = extractelement <16 x i16> %x, i32 %i11
447 %x12 = extractelement <16 x i16> %x, i32 %i12
448 %x13 = extractelement <16 x i16> %x, i32 %i13
449 %x14 = extractelement <16 x i16> %x, i32 %i14
450 %x15 = extractelement <16 x i16> %x, i32 %i15
451 %r0 = insertelement <16 x i16> undef, i16 %x0 , i32 0
452 %r1 = insertelement <16 x i16> %r0 , i16 %x1 , i32 1
453 %r2 = insertelement <16 x i16> %r1 , i16 %x2 , i32 2
454 %r3 = insertelement <16 x i16> %r2 , i16 %x3 , i32 3
455 %r4 = insertelement <16 x i16> %r3 , i16 %x4 , i32 4
456 %r5 = insertelement <16 x i16> %r4 , i16 %x5 , i32 5
457 %r6 = insertelement <16 x i16> %r5 , i16 %x6 , i32 6
458 %r7 = insertelement <16 x i16> %r6 , i16 %x7 , i32 7
459 %r8 = insertelement <16 x i16> %r7 , i16 %x8 , i32 8
460 %r9 = insertelement <16 x i16> %r8 , i16 %x9 , i32 9
461 %r10 = insertelement <16 x i16> %r9 , i16 %x10, i32 10
462 %r11 = insertelement <16 x i16> %r10, i16 %x11, i32 11
463 %r12 = insertelement <16 x i16> %r11, i16 %x12, i32 12
464 %r13 = insertelement <16 x i16> %r12, i16 %x13, i32 13
465 %r14 = insertelement <16 x i16> %r13, i16 %x14, i32 14
466 %r15 = insertelement <16 x i16> %r14, i16 %x15, i32 15
467 ret <16 x i16> %r15
468 }
469
470 define <16 x i16> @var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16(<8 x i16> %x, i32 %i0, i32 %i1, i32 %i2, i32 %i3, i32 %i4, i32 %i5, i32 %i6, i32 %i7, i32 %i8, i32 %i9, i32 %i10, i32 %i11, i32 %i12, i32 %i13, i32 %i14, i32 %i15) nounwind {
471 ; AVX1-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
472 ; AVX1: # BB#0:
473 ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
474 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
475 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
476 ; AVX1-NEXT: vmovd %eax, %xmm0
477 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
478 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
479 ; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
480 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
481 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
482 ; AVX1-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
483 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
484 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
485 ; AVX1-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
486 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
487 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
488 ; AVX1-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
489 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
490 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
491 ; AVX1-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
492 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
493 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
494 ; AVX1-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
495 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
496 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
497 ; AVX1-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
498 ; AVX1-NEXT: movslq %edi, %rax
499 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
500 ; AVX1-NEXT: vmovd %eax, %xmm1
501 ; AVX1-NEXT: movslq %esi, %rax
502 ; AVX1-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm1, %xmm1
503 ; AVX1-NEXT: movslq %edx, %rax
504 ; AVX1-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm1, %xmm1
505 ; AVX1-NEXT: movslq %ecx, %rax
506 ; AVX1-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm1, %xmm1
507 ; AVX1-NEXT: movslq %r8d, %rax
508 ; AVX1-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm1, %xmm1
509 ; AVX1-NEXT: movslq %r9d, %rax
510 ; AVX1-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1
511 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
512 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
513 ; AVX1-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
514 ; AVX1-NEXT: movslq {{[0-9]+}}(%rsp), %rax
515 ; AVX1-NEXT: movzwl -24(%rsp,%rax,2), %eax
516 ; AVX1-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
517 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
518 ; AVX1-NEXT: retq
519 ;
520 ; AVX2-LABEL: var_shuffle_v16i16_v8i16_xxxxxxxxxxxxxxxx_i16:
521 ; AVX2: # BB#0:
522 ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
523 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
524 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
525 ; AVX2-NEXT: vmovd %eax, %xmm0
526 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
527 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
528 ; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
529 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
530 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
531 ; AVX2-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
532 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
533 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
534 ; AVX2-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
535 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
536 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
537 ; AVX2-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
538 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
539 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
540 ; AVX2-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
541 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
542 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
543 ; AVX2-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
544 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
545 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
546 ; AVX2-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
547 ; AVX2-NEXT: movslq %edi, %rax
548 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
549 ; AVX2-NEXT: vmovd %eax, %xmm1
550 ; AVX2-NEXT: movslq %esi, %rax
551 ; AVX2-NEXT: vpinsrw $1, -24(%rsp,%rax,2), %xmm1, %xmm1
552 ; AVX2-NEXT: movslq %edx, %rax
553 ; AVX2-NEXT: vpinsrw $2, -24(%rsp,%rax,2), %xmm1, %xmm1
554 ; AVX2-NEXT: movslq %ecx, %rax
555 ; AVX2-NEXT: vpinsrw $3, -24(%rsp,%rax,2), %xmm1, %xmm1
556 ; AVX2-NEXT: movslq %r8d, %rax
557 ; AVX2-NEXT: vpinsrw $4, -24(%rsp,%rax,2), %xmm1, %xmm1
558 ; AVX2-NEXT: movslq %r9d, %rax
559 ; AVX2-NEXT: vpinsrw $5, -24(%rsp,%rax,2), %xmm1, %xmm1
560 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
561 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
562 ; AVX2-NEXT: vpinsrw $6, %eax, %xmm1, %xmm1
563 ; AVX2-NEXT: movslq {{[0-9]+}}(%rsp), %rax
564 ; AVX2-NEXT: movzwl -24(%rsp,%rax,2), %eax
565 ; AVX2-NEXT: vpinsrw $7, %eax, %xmm1, %xmm1
566 ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
567 ; AVX2-NEXT: retq
568 %x0 = extractelement <8 x i16> %x, i32 %i0
569 %x1 = extractelement <8 x i16> %x, i32 %i1
570 %x2 = extractelement <8 x i16> %x, i32 %i2
571 %x3 = extractelement <8 x i16> %x, i32 %i3
572 %x4 = extractelement <8 x i16> %x, i32 %i4
573 %x5 = extractelement <8 x i16> %x, i32 %i5
574 %x6 = extractelement <8 x i16> %x, i32 %i6
575 %x7 = extractelement <8 x i16> %x, i32 %i7
576 %x8 = extractelement <8 x i16> %x, i32 %i8
577 %x9 = extractelement <8 x i16> %x, i32 %i9
578 %x10 = extractelement <8 x i16> %x, i32 %i10
579 %x11 = extractelement <8 x i16> %x, i32 %i11
580 %x12 = extractelement <8 x i16> %x, i32 %i12
581 %x13 = extractelement <8 x i16> %x, i32 %i13
582 %x14 = extractelement <8 x i16> %x, i32 %i14
583 %x15 = extractelement <8 x i16> %x, i32 %i15
584 %r0 = insertelement <16 x i16> undef, i16 %x0 , i32 0
585 %r1 = insertelement <16 x i16> %r0 , i16 %x1 , i32 1
586 %r2 = insertelement <16 x i16> %r1 , i16 %x2 , i32 2
587 %r3 = insertelement <16 x i16> %r2 , i16 %x3 , i32 3
588 %r4 = insertelement <16 x i16> %r3 , i16 %x4 , i32 4
589 %r5 = insertelement <16 x i16> %r4 , i16 %x5 , i32 5
590 %r6 = insertelement <16 x i16> %r5 , i16 %x6 , i32 6
591 %r7 = insertelement <16 x i16> %r6 , i16 %x7 , i32 7
592 %r8 = insertelement <16 x i16> %r7 , i16 %x8 , i32 8
593 %r9 = insertelement <16 x i16> %r8 , i16 %x9 , i32 9
594 %r10 = insertelement <16 x i16> %r9 , i16 %x10, i32 10
595 %r11 = insertelement <16 x i16> %r10, i16 %x11, i32 11
596 %r12 = insertelement <16 x i16> %r11, i16 %x12, i32 12
597 %r13 = insertelement <16 x i16> %r12, i16 %x13, i32 13
598 %r14 = insertelement <16 x i16> %r13, i16 %x14, i32 14
599 %r15 = insertelement <16 x i16> %r14, i16 %x15, i32 15
600 ret <16 x i16> %r15
601 }
602
603 ;
604 ; Unary shuffle indices from memory
605 ;
606
607 define <4 x i64> @mem_shuffle_v4i64_v4i64_xxxx_i64(<4 x i64> %x, i64* %i) nounwind {
608 ; AVX1-LABEL: mem_shuffle_v4i64_v4i64_xxxx_i64:
609 ; AVX1: # BB#0:
610 ; AVX1-NEXT: pushq %rbp
611 ; AVX1-NEXT: movq %rsp, %rbp
612 ; AVX1-NEXT: andq $-32, %rsp
613 ; AVX1-NEXT: subq $64, %rsp
614 ; AVX1-NEXT: movq (%rdi), %rax
615 ; AVX1-NEXT: movq 8(%rdi), %rcx
616 ; AVX1-NEXT: movq 16(%rdi), %rdx
617 ; AVX1-NEXT: movq 24(%rdi), %rsi
618 ; AVX1-NEXT: vmovaps %ymm0, (%rsp)
619 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
620 ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
621 ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
622 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
623 ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
624 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
625 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
626 ; AVX1-NEXT: movq %rbp, %rsp
627 ; AVX1-NEXT: popq %rbp
628 ; AVX1-NEXT: retq
629 ;
630 ; AVX2-LABEL: mem_shuffle_v4i64_v4i64_xxxx_i64:
631 ; AVX2: # BB#0:
632 ; AVX2-NEXT: pushq %rbp
633 ; AVX2-NEXT: movq %rsp, %rbp
634 ; AVX2-NEXT: andq $-32, %rsp
635 ; AVX2-NEXT: subq $64, %rsp
636 ; AVX2-NEXT: movq (%rdi), %rax
637 ; AVX2-NEXT: movq 8(%rdi), %rcx
638 ; AVX2-NEXT: movq 16(%rdi), %rdx
639 ; AVX2-NEXT: movq 24(%rdi), %rsi
640 ; AVX2-NEXT: vmovaps %ymm0, (%rsp)
641 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
642 ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
643 ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
644 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
645 ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
646 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
647 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
648 ; AVX2-NEXT: movq %rbp, %rsp
649 ; AVX2-NEXT: popq %rbp
650 ; AVX2-NEXT: retq
651 %p0 = getelementptr inbounds i64, i64* %i, i32 0
652 %p1 = getelementptr inbounds i64, i64* %i, i32 1
653 %p2 = getelementptr inbounds i64, i64* %i, i32 2
654 %p3 = getelementptr inbounds i64, i64* %i, i32 3
655 %i0 = load i64, i64* %p0, align 4
656 %i1 = load i64, i64* %p1, align 4
657 %i2 = load i64, i64* %p2, align 4
658 %i3 = load i64, i64* %p3, align 4
659 %x0 = extractelement <4 x i64> %x, i64 %i0
660 %x1 = extractelement <4 x i64> %x, i64 %i1
661 %x2 = extractelement <4 x i64> %x, i64 %i2
662 %x3 = extractelement <4 x i64> %x, i64 %i3
663 %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
664 %r1 = insertelement <4 x i64> %r0, i64 %x1, i32 1
665 %r2 = insertelement <4 x i64> %r1, i64 %x2, i32 2
666 %r3 = insertelement <4 x i64> %r2, i64 %x3, i32 3
667 ret <4 x i64> %r3
668 }
669
670 define <4 x i64> @mem_shuffle_v4i64_v2i64_xxxx_i64(<2 x i64> %x, i64* %i) nounwind {
671 ; AVX1-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64:
672 ; AVX1: # BB#0:
673 ; AVX1-NEXT: movq (%rdi), %rax
674 ; AVX1-NEXT: movq 8(%rdi), %rcx
675 ; AVX1-NEXT: movq 16(%rdi), %rdx
676 ; AVX1-NEXT: movq 24(%rdi), %rsi
677 ; AVX1-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
678 ; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
679 ; AVX1-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
680 ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
681 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
682 ; AVX1-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
683 ; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
684 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
685 ; AVX1-NEXT: retq
686 ;
687 ; AVX2-LABEL: mem_shuffle_v4i64_v2i64_xxxx_i64:
688 ; AVX2: # BB#0:
689 ; AVX2-NEXT: movq (%rdi), %rax
690 ; AVX2-NEXT: movq 8(%rdi), %rcx
691 ; AVX2-NEXT: movq 16(%rdi), %rdx
692 ; AVX2-NEXT: movq 24(%rdi), %rsi
693 ; AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
694 ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
695 ; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
696 ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
697 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
698 ; AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
699 ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
700 ; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
701 ; AVX2-NEXT: retq
702 %p0 = getelementptr inbounds i64, i64* %i, i32 0
703 %p1 = getelementptr inbounds i64, i64* %i, i32 1
704 %p2 = getelementptr inbounds i64, i64* %i, i32 2
705 %p3 = getelementptr inbounds i64, i64* %i, i32 3
706 %i0 = load i64, i64* %p0, align 4
707 %i1 = load i64, i64* %p1, align 4
708 %i2 = load i64, i64* %p2, align 4
709 %i3 = load i64, i64* %p3, align 4
710 %x0 = extractelement <2 x i64> %x, i64 %i0
711 %x1 = extractelement <2 x i64> %x, i64 %i1
712 %x2 = extractelement <2 x i64> %x, i64 %i2
713 %x3 = extractelement <2 x i64> %x, i64 %i3
714 %r0 = insertelement <4 x i64> undef, i64 %x0, i32 0
715 %r1 = insertelement <4 x i64> %r0, i64 %x1, i32 1
716 %r2 = insertelement <4 x i64> %r1, i64 %x2, i32 2
717 %r3 = insertelement <4 x i64> %r2, i64 %x3, i32 3
718 ret <4 x i64> %r3
719 }