llvm.org GIT mirror llvm / 05cee0c
Convert this test to .s form. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117708 91177308-0d34-0410-b5e6-96231b3b80d8 Owen Anderson 9 years ago
2 changed file(s) with 150 addition(s) and 625 deletion(s). Raw diff Collapse all Expand all
+0
-625
test/MC/ARM/neon-satshift-encoding.ll less more
None ; RUN: llc -show-mc-encoding -march=arm -mcpu=cortex-a8 -mattr=+neon < %s | FileCheck %s
1
2 ; XFAIL: *
3
4 define <8 x i8> @vqshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
5 %tmp1 = load <8 x i8>* %A
6 %tmp2 = load <8 x i8>* %B
7 ; CHECK: vqshl.s8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf2]
8 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
9 ret <8 x i8> %tmp3
10 }
11
12 define <4 x i16> @vqshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
13 %tmp1 = load <4 x i16>* %A
14 %tmp2 = load <4 x i16>* %B
15 ; CHECK: vqshl.s16 d16, d16, d17 @ encoding: [0xb0,0x04,0x51,0xf2]
16 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
17 ret <4 x i16> %tmp3
18 }
19
20 define <2 x i32> @vqshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
21 %tmp1 = load <2 x i32>* %A
22 %tmp2 = load <2 x i32>* %B
23 ; CHECK: vqshl.s32 d16, d16, d17 @ encoding: [0xb0,0x04,0x61,0xf2]
24 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
25 ret <2 x i32> %tmp3
26 }
27
28 define <1 x i64> @vqshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
29 %tmp1 = load <1 x i64>* %A
30 %tmp2 = load <1 x i64>* %B
31 ; CHECK: vqshl.s64 d16, d16, d17 @ encoding: [0xb0,0x04,0x71,0xf2]
32 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
33 ret <1 x i64> %tmp3
34 }
35
36 define <8 x i8> @vqshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
37 %tmp1 = load <8 x i8>* %A
38 %tmp2 = load <8 x i8>* %B
39 %tmp3 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
40 ret <8 x i8> %tmp3
41 }
42
43 define <4 x i16> @vqshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
44 %tmp1 = load <4 x i16>* %A
45 %tmp2 = load <4 x i16>* %B
46 ; CHECK: vqshl.u8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf3]
47 %tmp3 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
48 ret <4 x i16> %tmp3
49 }
50
51 define <2 x i32> @vqshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
52 %tmp1 = load <2 x i32>* %A
53 %tmp2 = load <2 x i32>* %B
54 ; CHECK: vqshl.u32 d16, d16, d17 @ encoding: [0xb0,0x04,0x61,0xf3]
55 %tmp3 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
56 ret <2 x i32> %tmp3
57 }
58
59 define <1 x i64> @vqshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
60 %tmp1 = load <1 x i64>* %A
61 %tmp2 = load <1 x i64>* %B
62 ; CHECK: vqshl.u64 d16, d16, d17 @ encoding: [0xb0,0x04,0x71,0xf3]
63 %tmp3 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
64 ret <1 x i64> %tmp3
65 }
66
67 define <16 x i8> @vqshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
68 %tmp1 = load <16 x i8>* %A
69 %tmp2 = load <16 x i8>* %B
70 ; CHECK: vqshl.s8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf2]
71 %tmp3 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
72 ret <16 x i8> %tmp3
73 }
74
75 define <8 x i16> @vqshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
76 %tmp1 = load <8 x i16>* %A
77 %tmp2 = load <8 x i16>* %B
78 ; CHECK: vqshl.s16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf2]
79 %tmp3 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
80 ret <8 x i16> %tmp3
81 }
82
83 define <4 x i32> @vqshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
84 %tmp1 = load <4 x i32>* %A
85 %tmp2 = load <4 x i32>* %B
86 %tmp3 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
87 ret <4 x i32> %tmp3
88 }
89
90 define <2 x i64> @vqshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
91 %tmp1 = load <2 x i64>* %A
92 %tmp2 = load <2 x i64>* %B
93 ; CHECK: vqshl.s32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf2]
94 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
95 ret <2 x i64> %tmp3
96 }
97
98 define <16 x i8> @vqshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
99 %tmp1 = load <16 x i8>* %A
100 %tmp2 = load <16 x i8>* %B
101 ; CHECK: vqshl.u8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf3]
102 %tmp3 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
103 ret <16 x i8> %tmp3
104 }
105
106 define <8 x i16> @vqshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
107 %tmp1 = load <8 x i16>* %A
108 %tmp2 = load <8 x i16>* %B
109 ; CHECK: vqshl.u16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf3]
110 %tmp3 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
111 ret <8 x i16> %tmp3
112 }
113
114 define <4 x i32> @vqshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
115 %tmp1 = load <4 x i32>* %A
116 %tmp2 = load <4 x i32>* %B
117 ; CHECK: vqshl.u32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf3]
118 %tmp3 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
119 ret <4 x i32> %tmp3
120 }
121
122 define <2 x i64> @vqshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
123 %tmp1 = load <2 x i64>* %A
124 %tmp2 = load <2 x i64>* %B
125 ; CHECK: vqshl.u64 q8, q8, q9 @ encoding: [0xf0,0x04,0x72,0xf3]
126 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
127 ret <2 x i64> %tmp3
128 }
129
130 define <8 x i8> @vqshls_n8(<8 x i8>* %A) nounwind {
131 %tmp1 = load <8 x i8>* %A
132 ; CHECK: vqshl.s8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf2]
133 %tmp2 = call <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
134 ret <8 x i8> %tmp2
135 }
136
137 define <4 x i16> @vqshls_n16(<4 x i16>* %A) nounwind {
138 %tmp1 = load <4 x i16>* %A
139 ; CHECK: vqshl.s16 d16, d16, #15 @ encoding: [0x30,0x07,0xdf,0xf2]
140 %tmp2 = call <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
141 ret <4 x i16> %tmp2
142 }
143
144 define <2 x i32> @vqshls_n32(<2 x i32>* %A) nounwind {
145 %tmp1 = load <2 x i32>* %A
146 ; CHECK: vqshl.s32 d16, d16, #31 @ encoding: [0x30,0x07,0xff,0xf2]
147 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
148 ret <2 x i32> %tmp2
149 }
150
151 define <1 x i64> @vqshls_n64(<1 x i64>* %A) nounwind {
152 %tmp1 = load <1 x i64>* %A
153 ; CHECK: vqshl.s64 d16, d16, #63 @ encoding: [0xb0,0x07,0xff,0xf2]
154 %tmp2 = call <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
155 ret <1 x i64> %tmp2
156 }
157
158 define <8 x i8> @vqshlu_n8(<8 x i8>* %A) nounwind {
159 %tmp1 = load <8 x i8>* %A
160 ; CHECK: vqshl.u8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf3]
161 %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
162 ret <8 x i8> %tmp2
163 }
164
165 define <4 x i16> @vqshlu_n16(<4 x i16>* %A) nounwind {
166 %tmp1 = load <4 x i16>* %A
167 ; CHECK: vqshl.u16 d16, d16, #15 @ encoding: [0x30,0x07,0xdf,0xf3]
168 %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
169 ret <4 x i16> %tmp2
170 }
171
172 define <2 x i32> @vqshlu_n32(<2 x i32>* %A) nounwind {
173 %tmp1 = load <2 x i32>* %A
174 ; CHECK: vqshl.u32 d16, d16, #31 @ encoding: [0x30,0x07,0xff,0xf3]
175 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
176 ret <2 x i32> %tmp2
177 }
178
179 define <1 x i64> @vqshlu_n64(<1 x i64>* %A) nounwind {
180 %tmp1 = load <1 x i64>* %A
181 ; CHECK: vqshl.u64 d16, d16, #63 @ encoding: [0xb0,0x07,0xff,0xf3]
182 %tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
183 ret <1 x i64> %tmp2
184 }
185
186 define <8 x i8> @vqshlsu_n8(<8 x i8>* %A) nounwind {
187 %tmp1 = load <8 x i8>* %A
188 ; CHECK: vqshlu.s8 d16, d16, #7 @ encoding: [0x30,0x06,0xcf,0xf3]
189 %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftsu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
190 ret <8 x i8> %tmp2
191 }
192
193 define <4 x i16> @vqshlsu_n16(<4 x i16>* %A) nounwind {
194 %tmp1 = load <4 x i16>* %A
195 ; CHECK: vqshlu.s16 d16, d16, #15 @ encoding: [0x30,0x06,0xdf,0xf3]
196 %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftsu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >)
197 ret <4 x i16> %tmp2
198 }
199
200 define <2 x i32> @vqshlsu_n32(<2 x i32>* %A) nounwind {
201 %tmp1 = load <2 x i32>* %A
202 ; CHECK: vqshlu.s32 d16, d16, #31 @ encoding: [0x30,0x06,0xff,0xf3]
203 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftsu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >)
204 ret <2 x i32> %tmp2
205 }
206
207 define <1 x i64> @vqshlsu_n64(<1 x i64>* %A) nounwind {
208 %tmp1 = load <1 x i64>* %A
209 ; CHECK: vqshlu.s64 d16, d16, #63 @ encoding: [0xb0,0x06,0xff,0xf3]
210 %tmp2 = call <1 x i64> @llvm.arm.neon.vqshiftsu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >)
211 ret <1 x i64> %tmp2
212 }
213
214 define <16 x i8> @vqshlQs_n8(<16 x i8>* %A) nounwind {
215 %tmp1 = load <16 x i8>* %A
216 ; CHECK: vqshl.s8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf2]
217 %tmp2 = call <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
218 ret <16 x i8> %tmp2
219 }
220
221 define <8 x i16> @vqshlQs_n16(<8 x i16>* %A) nounwind {
222 %tmp1 = load <8 x i16>* %A
223 ; CHECK: vqshl.s16 q8, q8, #15 @ encoding: [0x70,0x07,0xdf,0xf2]
224 %tmp2 = call <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
225 ret <8 x i16> %tmp2
226 }
227
228 define <4 x i32> @vqshlQs_n32(<4 x i32>* %A) nounwind {
229 %tmp1 = load <4 x i32>* %A
230 ; CHECK: vqshl.s32 q8, q8, #31 @ encoding: [0x70,0x07,0xff,0xf2]
231 %tmp2 = call <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
232 ret <4 x i32> %tmp2
233 }
234
235 define <2 x i64> @vqshlQs_n64(<2 x i64>* %A) nounwind {
236 %tmp1 = load <2 x i64>* %A
237 ; CHECK: vqshl.s64 q8, q8, #63 @ encoding: [0xf0,0x07,0xff,0xf2]
238 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
239 ret <2 x i64> %tmp2
240 }
241
242 define <16 x i8> @vqshlQu_n8(<16 x i8>* %A) nounwind {
243 %tmp1 = load <16 x i8>* %A
244 ; CHECK: vqshl.u8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf3]
245 %tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
246 ret <16 x i8> %tmp2
247 }
248
249 define <8 x i16> @vqshlQu_n16(<8 x i16>* %A) nounwind {
250 %tmp1 = load <8 x i16>* %A
251 ; CHECK: vqshl.u16 q8, q8, #15 @ encoding: [0x70,0x07,0xdf,0xf3]
252 %tmp2 = call <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
253 ret <8 x i16> %tmp2
254 }
255
256 define <4 x i32> @vqshlQu_n32(<4 x i32>* %A) nounwind {
257 %tmp1 = load <4 x i32>* %A
258 ; CHECK: vqshl.u32 q8, q8, #31 @ encoding: [0x70,0x07,0xff,0xf3]
259 %tmp2 = call <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
260 ret <4 x i32> %tmp2
261 }
262
263 define <2 x i64> @vqshlQu_n64(<2 x i64>* %A) nounwind {
264 %tmp1 = load <2 x i64>* %A
265 ; CHECK: vqshl.u64 q8, q8, #63 @ encoding: [0xf0,0x07,0xff,0xf3]
266 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
267 ret <2 x i64> %tmp2
268 }
269
270 define <16 x i8> @vqshlQsu_n8(<16 x i8>* %A) nounwind {
271 %tmp1 = load <16 x i8>* %A
272 ; CHECK: vqshlu.s8 q8, q8, #7 @ encoding: [0x70,0x06,0xcf,0xf3]
273 %tmp2 = call <16 x i8> @llvm.arm.neon.vqshiftsu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >)
274 ret <16 x i8> %tmp2
275 }
276
277 define <8 x i16> @vqshlQsu_n16(<8 x i16>* %A) nounwind {
278 %tmp1 = load <8 x i16>* %A
279 ; CHECK: vqshlu.s16 q8, q8, #15 @ encoding: [0x70,0x06,0xdf,0xf3]
280 %tmp2 = call <8 x i16> @llvm.arm.neon.vqshiftsu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >)
281 ret <8 x i16> %tmp2
282 }
283
284 define <4 x i32> @vqshlQsu_n32(<4 x i32>* %A) nounwind {
285 %tmp1 = load <4 x i32>* %A
286 ; CHECK: vqshlu.s32 q8, q8, #31 @ encoding: [0x70,0x06,0xff,0xf3]
287 %tmp2 = call <4 x i32> @llvm.arm.neon.vqshiftsu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >)
288 ret <4 x i32> %tmp2
289 }
290
291 define <2 x i64> @vqshlQsu_n64(<2 x i64>* %A) nounwind {
292 %tmp1 = load <2 x i64>* %A
293 ; CHECK: vqshlu.s64 q8, q8, #63 @ encoding: [0xf0,0x06,0xff,0xf3]
294 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >)
295 ret <2 x i64> %tmp2
296 }
297
298 declare <8 x i8> @llvm.arm.neon.vqshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
299 declare <4 x i16> @llvm.arm.neon.vqshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
300 declare <2 x i32> @llvm.arm.neon.vqshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
301 declare <1 x i64> @llvm.arm.neon.vqshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
302
303 declare <8 x i8> @llvm.arm.neon.vqshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
304 declare <4 x i16> @llvm.arm.neon.vqshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
305 declare <2 x i32> @llvm.arm.neon.vqshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
306 declare <1 x i64> @llvm.arm.neon.vqshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
307
308 declare <8 x i8> @llvm.arm.neon.vqshiftsu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
309 declare <4 x i16> @llvm.arm.neon.vqshiftsu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
310 declare <2 x i32> @llvm.arm.neon.vqshiftsu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
311 declare <1 x i64> @llvm.arm.neon.vqshiftsu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
312
313 declare <16 x i8> @llvm.arm.neon.vqshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
314 declare <8 x i16> @llvm.arm.neon.vqshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
315 declare <4 x i32> @llvm.arm.neon.vqshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
316 declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
317
318 declare <16 x i8> @llvm.arm.neon.vqshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
319 declare <8 x i16> @llvm.arm.neon.vqshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
320 declare <4 x i32> @llvm.arm.neon.vqshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
321 declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
322
323 declare <16 x i8> @llvm.arm.neon.vqshiftsu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
324 declare <8 x i16> @llvm.arm.neon.vqshiftsu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
325 declare <4 x i32> @llvm.arm.neon.vqshiftsu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
326 declare <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
327
328 define <8 x i8> @vqrshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
329 %tmp1 = load <8 x i8>* %A
330 %tmp2 = load <8 x i8>* %B
331 ; CHECK: vqrshl.s8 d16, d16, d17 @ encoding: [0xb0,0x05,0x41,0xf2]
332 %tmp3 = call <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
333 ret <8 x i8> %tmp3
334 }
335
336 define <4 x i16> @vqrshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
337 %tmp1 = load <4 x i16>* %A
338 %tmp2 = load <4 x i16>* %B
339 ; CHECK: vqrshl.s16 d16, d16, d17 @ encoding: [0xb0,0x05,0x51,0xf2]
340 %tmp3 = call <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
341 ret <4 x i16> %tmp3
342 }
343
344 define <2 x i32> @vqrshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
345 %tmp1 = load <2 x i32>* %A
346 %tmp2 = load <2 x i32>* %B
347 ; CHECK: vqrshl.s32 d16, d16, d17 @ encoding: [0xb0,0x05,0x61,0xf2]
348 %tmp3 = call <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
349 ret <2 x i32> %tmp3
350 }
351
352 define <1 x i64> @vqrshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
353 %tmp1 = load <1 x i64>* %A
354 %tmp2 = load <1 x i64>* %B
355 ; CHECK: vqrshl.s64 d16, d16, d17 @ encoding: [0xb0,0x05,0x71,0xf2]
356 %tmp3 = call <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
357 ret <1 x i64> %tmp3
358 }
359
360 define <8 x i8> @vqrshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
361 %tmp1 = load <8 x i8>* %A
362 %tmp2 = load <8 x i8>* %B
363 ; CHECK: vqrshl.u8 d16, d16, d17 @ encoding: [0xb0,0x05,0x41,0xf3
364 %tmp3 = call <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2)
365 ret <8 x i8> %tmp3
366 }
367
368 define <4 x i16> @vqrshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
369 %tmp1 = load <4 x i16>* %A
370 %tmp2 = load <4 x i16>* %B
371 ; CHECK: vqrshl.u16 d16, d16, d17 @ encoding: [0xb0,0x05,0x51,0xf3]
372 %tmp3 = call <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2)
373 ret <4 x i16> %tmp3
374 }
375
376 define <2 x i32> @vqrshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
377 %tmp1 = load <2 x i32>* %A
378 %tmp2 = load <2 x i32>* %B
379 ; CHECK: vqrshl.u32 d16, d16, d17 @ encoding: [0xb0,0x05,0x61,0xf3]
380 %tmp3 = call <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2)
381 ret <2 x i32> %tmp3
382 }
383
384 define <1 x i64> @vqrshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind {
385 %tmp1 = load <1 x i64>* %A
386 %tmp2 = load <1 x i64>* %B
387 ; CHECK: vqrshl.u64 d16, d16, d17 @ encoding: [0xb0,0x05,0x71,0xf3]
388 %tmp3 = call <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2)
389 ret <1 x i64> %tmp3
390 }
391
392 define <16 x i8> @vqrshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
393 %tmp1 = load <16 x i8>* %A
394 %tmp2 = load <16 x i8>* %B
395 ; CHECK: vqrshl.s8 q8, q8, q9 @ encoding: [0xf0,0x05,0x42,0xf2]
396 %tmp3 = call <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
397 ret <16 x i8> %tmp3
398 }
399
400 define <8 x i16> @vqrshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
401 %tmp1 = load <8 x i16>* %A
402 %tmp2 = load <8 x i16>* %B
403 ; CHECK: vqrshl.s16 q8, q8, q9 @ encoding: [0xf0,0x05,0x52,0xf2]
404 %tmp3 = call <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
405 ret <8 x i16> %tmp3
406 }
407
408 define <4 x i32> @vqrshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
409 %tmp1 = load <4 x i32>* %A
410 %tmp2 = load <4 x i32>* %B
411 ; CHECK: vqrshl.s32 q8, q8, q9 @ encoding: [0xf0,0x05,0x62,0xf2]
412 %tmp3 = call <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
413 ret <4 x i32> %tmp3
414 }
415
416 define <2 x i64> @vqrshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
417 %tmp1 = load <2 x i64>* %A
418 %tmp2 = load <2 x i64>* %B
419 ; CHECK: vqrshl.s64 q8, q8, q9 @ encoding: [0xf0,0x05,0x72,0xf2]
420 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
421 ret <2 x i64> %tmp3
422 }
423
424 define <16 x i8> @vqrshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
425 %tmp1 = load <16 x i8>* %A
426 %tmp2 = load <16 x i8>* %B
427 ; CHECK: vqrshl.u8 q8, q8, q9 @ encoding: [0xf0,0x05,0x42,0xf3]
428 %tmp3 = call <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2)
429 ret <16 x i8> %tmp3
430 }
431
432 define <8 x i16> @vqrshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
433 %tmp1 = load <8 x i16>* %A
434 %tmp2 = load <8 x i16>* %B
435 ; CHECK: vqrshl.u16 q8, q8, q9 @ encoding: [0xf0,0x05,0x52,0xf3]
436 %tmp3 = call <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2)
437 ret <8 x i16> %tmp3
438 }
439
440 define <4 x i32> @vqrshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
441 %tmp1 = load <4 x i32>* %A
442 %tmp2 = load <4 x i32>* %B
443 ; CHECK: vqrshl.u32 q8, q8, q9 @ encoding: [0xf0,0x05,0x62,0xf3]
444 %tmp3 = call <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2)
445 ret <4 x i32> %tmp3
446 }
447
448 define <2 x i64> @vqrshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind {
449 %tmp1 = load <2 x i64>* %A
450 %tmp2 = load <2 x i64>* %B
451 ; CHECK: vqrshl.u64 q8, q8, q9 @ encoding: [0xf0,0x05,0x72,0xf3]
452 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2)
453 ret <2 x i64> %tmp3
454 }
455
456 declare <8 x i8> @llvm.arm.neon.vqrshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
457 declare <4 x i16> @llvm.arm.neon.vqrshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
458 declare <2 x i32> @llvm.arm.neon.vqrshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
459 declare <1 x i64> @llvm.arm.neon.vqrshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
460
461 declare <8 x i8> @llvm.arm.neon.vqrshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone
462 declare <4 x i16> @llvm.arm.neon.vqrshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone
463 declare <2 x i32> @llvm.arm.neon.vqrshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone
464 declare <1 x i64> @llvm.arm.neon.vqrshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone
465
466 declare <16 x i8> @llvm.arm.neon.vqrshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
467 declare <8 x i16> @llvm.arm.neon.vqrshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
468 declare <4 x i32> @llvm.arm.neon.vqrshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
469 declare <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
470
471 declare <16 x i8> @llvm.arm.neon.vqrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
472 declare <8 x i16> @llvm.arm.neon.vqrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
473 declare <4 x i32> @llvm.arm.neon.vqrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone
474 declare <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
475
476 define <8 x i8> @vqshrns8(<8 x i16>* %A) nounwind {
477 %tmp1 = load <8 x i16>* %A
478 ; CHECK: vqshrn.s16 d16, q8, #8 @ encoding: [0x30,0x09,0xc8,0xf2]
479 %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
480 ret <8 x i8> %tmp2
481 }
482
483 define <4 x i16> @vqshrns16(<4 x i32>* %A) nounwind {
484 %tmp1 = load <4 x i32>* %A
485 ; CHECK: vqshrn.s32 d16, q8, #16 @ encoding: [0x30,0x09,0xd0,0xf2]
486 %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
487 ret <4 x i16> %tmp2
488 }
489
490 define <2 x i32> @vqshrns32(<2 x i64>* %A) nounwind {
491 %tmp1 = load <2 x i64>* %A
492 ; CHECK: vqshrn.s64 d16, q8, #32 @ encoding: [0x30,0x09,0xe0,0xf2]
493 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
494 ret <2 x i32> %tmp2
495 }
496
497 define <8 x i8> @vqshrnu8(<8 x i16>* %A) nounwind {
498 %tmp1 = load <8 x i16>* %A
499 ; CHECK: vqshrn.u16 d16, q8, #8 @ encoding: [0x30,0x09,0xc8,0xf3]
500 %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
501 ret <8 x i8> %tmp2
502 }
503
504 define <4 x i16> @vqshrnu16(<4 x i32>* %A) nounwind {
505 %tmp1 = load <4 x i32>* %A
506 ; CHECK: vqshrn.u32 d16, q8, #16 @ encoding: [0x30,0x09,0xd0,0xf3]
507 %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
508 ret <4 x i16> %tmp2
509 }
510
511 define <2 x i32> @vqshrnu32(<2 x i64>* %A) nounwind {
512 %tmp1 = load <2 x i64>* %A
513 ; CHECK: vqshrn.u64 d16, q8, #32 @ encoding: [0x30,0x09,0xe0,0xf3]
514 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
515 ret <2 x i32> %tmp2
516 }
517
518 define <8 x i8> @vqshruns8(<8 x i16>* %A) nounwind {
519 %tmp1 = load <8 x i16>* %A
520 ; CHECK: vqshrun.s16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf3]
521 %tmp2 = call <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
522 ret <8 x i8> %tmp2
523 }
524
525 define <4 x i16> @vqshruns16(<4 x i32>* %A) nounwind {
526 %tmp1 = load <4 x i32>* %A
527 ; CHECK: vqshrun.s32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf3]
528 %tmp2 = call <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
529 ret <4 x i16> %tmp2
530 }
531
532 define <2 x i32> @vqshruns32(<2 x i64>* %A) nounwind {
533 %tmp1 = load <2 x i64>* %A
534 ; CHECK: vqshrun.s64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf3]
535 %tmp2 = call <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
536 ret <2 x i32> %tmp2
537 }
538
539 declare <8 x i8> @llvm.arm.neon.vqshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
540 declare <4 x i16> @llvm.arm.neon.vqshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
541 declare <2 x i32> @llvm.arm.neon.vqshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
542
543 declare <8 x i8> @llvm.arm.neon.vqshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
544 declare <4 x i16> @llvm.arm.neon.vqshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
545 declare <2 x i32> @llvm.arm.neon.vqshiftnu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
546
547 declare <8 x i8> @llvm.arm.neon.vqshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
548 declare <4 x i16> @llvm.arm.neon.vqshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
549 declare <2 x i32> @llvm.arm.neon.vqshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
550
551 define <8 x i8> @vqrshrns8(<8 x i16>* %A) nounwind {
552 %tmp1 = load <8 x i16>* %A
553 ; CHECK: vqrshrn.s16 d16, q8, #8 @ encoding: [0x70,0x09,0xc8,0xf2]
554 %tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
555 ret <8 x i8> %tmp2
556 }
557
558 define <4 x i16> @vqrshrns16(<4 x i32>* %A) nounwind {
559 %tmp1 = load <4 x i32>* %A
560 ; CHECK: vqrshrn.s32 d16, q8, #16 @ encoding: [0x70,0x09,0xd0,0xf2]
561 %tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
562 ret <4 x i16> %tmp2
563 }
564
565 define <2 x i32> @vqrshrns32(<2 x i64>* %A) nounwind {
566 %tmp1 = load <2 x i64>* %A
567 ; CHECK: vqrshrn.s64 d16, q8, #32 @ encoding: [0x70,0x09,0xe0,0xf2]
568 %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
569 ret <2 x i32> %tmp2
570 }
571
572 define <8 x i8> @vqrshrnu8(<8 x i16>* %A) nounwind {
573 %tmp1 = load <8 x i16>* %A
574 ; CHECK: vqrshrn.u16 d16, q8, #8 @ encoding: [0x70,0x09,0xc8,0xf3]
575 %tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
576 ret <8 x i8> %tmp2
577 }
578
579 define <4 x i16> @vqrshrnu16(<4 x i32>* %A) nounwind {
580 %tmp1 = load <4 x i32>* %A
581 ; CHECK: vqrshrn.u32 d16, q8, #16 @ encoding: [0x70,0x09,0xd0,0xf3]
582 %tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
583 ret <4 x i16> %tmp2
584 }
585
586 define <2 x i32> @vqrshrnu32(<2 x i64>* %A) nounwind {
587 %tmp1 = load <2 x i64>* %A
588 ; CHECK: vqrshrn.u64 d16, q8, #32 @ encoding: [0x70,0x09,0xe0,0xf3]
589 %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
590 ret <2 x i32> %tmp2
591 }
592
593 define <8 x i8> @vqrshruns8(<8 x i16>* %A) nounwind {
594 %tmp1 = load <8 x i16>* %A
595 ; CHECK: vqrshrun.s16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf3]
596 %tmp2 = call <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16> %tmp1, <8 x i16> < i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8, i16 -8 >)
597 ret <8 x i8> %tmp2
598 }
599
600 define <4 x i16> @vqrshruns16(<4 x i32>* %A) nounwind {
601 %tmp1 = load <4 x i32>* %A
602 ; CHECK: vqrshrun.s32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf3]
603 %tmp2 = call <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32> %tmp1, <4 x i32> < i32 -16, i32 -16, i32 -16, i32 -16 >)
604 ret <4 x i16> %tmp2
605 }
606
607 define <2 x i32> @vqrshruns32(<2 x i64>* %A) nounwind {
608 %tmp1 = load <2 x i64>* %A
609 ; CHECK: vqrshrun.s64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf3]
610 %tmp2 = call <2 x i32> @llvm.arm.neon.vqrshiftnsu.v2i32(<2 x i64> %tmp1, <2 x i64> < i64 -32, i64 -32 >)
611 ret <2 x i32> %tmp2
612 }
613
614 declare <8 x i8> @llvm.arm.neon.vqrshiftns.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
615 declare <4 x i16> @llvm.arm.neon.vqrshiftns.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
616 declare <2 x i32> @llvm.arm.neon.vqrshiftns.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
617
618 declare <8 x i8> @llvm.arm.neon.vqrshiftnu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
619 declare <4 x i16> @llvm.arm.neon.vqrshiftnu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
620 declare <2 x i32> @llvm.arm.neon.vqrshiftnu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
621
622 declare <8 x i8> @llvm.arm.neon.vqrshiftnsu.v8i8(<8 x i16>, <8 x i16>) nounwind readnone
623 declare <4 x i16> @llvm.arm.neon.vqrshiftnsu.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
624 declare <2 x i32> @llvm.arm.neon.vqrshiftnsu.v2i32(<2 x i64>, <2 x i64>) nounwind readnone
0 // RUN: llvm-mc -triple arm-unknown-unkown -show-encoding < %s | FileCheck %s
1
2 // CHECK: vqshl.s8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf2]
3 vqshl.s8 d16, d16, d17
4 // CHECK: vqshl.s16 d16, d16, d17 @ encoding: [0xb0,0x04,0x51,0xf2]
5 vqshl.s16 d16, d16, d17
6 // CHECK: vqshl.s32 d16, d16, d17 @ encoding: [0xb0,0x04,0x61,0xf2]
7 vqshl.s32 d16, d16, d17
8 // CHECK: vqshl.s64 d16, d16, d17 @ encoding: [0xb0,0x04,0x71,0xf2]
9 vqshl.s64 d16, d16, d17
10 // CHECK: vqshl.u8 d16, d16, d17 @ encoding: [0xb0,0x04,0x41,0xf3]
11 vqshl.u8 d16, d16, d17
12 // CHECK: vqshl.u16 d16, d16, d17 @ encoding: [0xb0,0x04,0x51,0xf3]
13 vqshl.u16 d16, d16, d17
14 // CHECK: vqshl.u32 d16, d16, d17 @ encoding: [0xb0,0x04,0x61,0xf3]
15 vqshl.u32 d16, d16, d17
16 // CHECK: vqshl.u64 d16, d16, d17 @ encoding: [0xb0,0x04,0x71,0xf3]
17 vqshl.u64 d16, d16, d17
18 // CHECK: vqshl.s8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf2]
19 vqshl.s8 q8, q8, q9
20 // CHECK: vqshl.s16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf2]
21 vqshl.s16 q8, q8, q9
22 // CHECK: vqshl.s32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf2]
23 vqshl.s32 q8, q8, q9
24 // CHECK: vqshl.s64 q8, q8, q9 @ encoding: [0xf0,0x04,0x72,0xf2]
25 vqshl.s64 q8, q8, q9
26 // CHECK: vqshl.u8 q8, q8, q9 @ encoding: [0xf0,0x04,0x42,0xf3]
27 vqshl.u8 q8, q8, q9
28 // CHECK: vqshl.u16 q8, q8, q9 @ encoding: [0xf0,0x04,0x52,0xf3]
29 vqshl.u16 q8, q8, q9
30 // CHECK: vqshl.u32 q8, q8, q9 @ encoding: [0xf0,0x04,0x62,0xf3]
31 vqshl.u32 q8, q8, q9
32 // CHECK: vqshl.u64 q8, q8, q9 @ encoding: [0xf0,0x04,0x72,0xf3]
33 vqshl.u64 q8, q8, q9
34 // CHECK: vqshl.s8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf2]
35 vqshl.s8 d16, d16, #7
36 // CHECK: vqshl.s16 d16, d16, #15 @ encoding: [0x30,0x07,0xdf,0xf2]
37 vqshl.s16 d16, d16, #15
38 // CHECK: vqshl.s32 d16, d16, #31 @ encoding: [0x30,0x07,0xff,0xf2]
39 vqshl.s32 d16, d16, #31
40 // CHECK: vqshl.s64 d16, d16, #63 @ encoding: [0xb0,0x07,0xff,0xf2]
41 vqshl.s64 d16, d16, #63
42 // CHECK: vqshl.u8 d16, d16, #7 @ encoding: [0x30,0x07,0xcf,0xf3]
43 vqshl.u8 d16, d16, #7
44 // CHECK: vqshl.u16 d16, d16, #15 @ encoding: [0x30,0x07,0xdf,0xf3]
45 vqshl.u16 d16, d16, #15
46 // CHECK: vqshl.u32 d16, d16, #31 @ encoding: [0x30,0x07,0xff,0xf3]
47 vqshl.u32 d16, d16, #31
48 // CHECK: vqshl.u64 d16, d16, #63 @ encoding: [0xb0,0x07,0xff,0xf3]
49 vqshl.u64 d16, d16, #63
50 // CHECK: vqshlu.s8 d16, d16, #7 @ encoding: [0x30,0x06,0xcf,0xf3]
51 vqshlu.s8 d16, d16, #7
52 // CHECK: vqshlu.s16 d16, d16, #15 @ encoding: [0x30,0x06,0xdf,0xf3]
53 vqshlu.s16 d16, d16, #15
54 // CHECK: vqshlu.s32 d16, d16, #31 @ encoding: [0x30,0x06,0xff,0xf3]
55 vqshlu.s32 d16, d16, #31
56 // CHECK: vqshlu.s64 d16, d16, #63 @ encoding: [0xb0,0x06,0xff,0xf3]
57 vqshlu.s64 d16, d16, #63
58 // CHECK: vqshl.s8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf2]
59 vqshl.s8 q8, q8, #7
60 // CHECK: vqshl.s16 q8, q8, #15 @ encoding: [0x70,0x07,0xdf,0xf2]
61 vqshl.s16 q8, q8, #15
62 // CHECK: vqshl.s32 q8, q8, #31 @ encoding: [0x70,0x07,0xff,0xf2]
63 vqshl.s32 q8, q8, #31
64 // CHECK: vqshl.s64 q8, q8, #63 @ encoding: [0xf0,0x07,0xff,0xf2]
65 vqshl.s64 q8, q8, #63
66 // CHECK: vqshl.u8 q8, q8, #7 @ encoding: [0x70,0x07,0xcf,0xf3]
67 vqshl.u8 q8, q8, #7
68 // CHECK: vqshl.u16 q8, q8, #15 @ encoding: [0x70,0x07,0xdf,0xf3]
69 vqshl.u16 q8, q8, #15
70 // CHECK: vqshl.u32 q8, q8, #31 @ encoding: [0x70,0x07,0xff,0xf3]
71 vqshl.u32 q8, q8, #31
72 // CHECK: vqshl.u64 q8, q8, #63 @ encoding: [0xf0,0x07,0xff,0xf3]
73 vqshl.u64 q8, q8, #63
74 // CHECK: vqshlu.s8 q8, q8, #7 @ encoding: [0x70,0x06,0xcf,0xf3]
75 vqshlu.s8 q8, q8, #7
76 // CHECK: vqshlu.s16 q8, q8, #15 @ encoding: [0x70,0x06,0xdf,0xf3]
77 vqshlu.s16 q8, q8, #15
78 // CHECK: vqshlu.s32 q8, q8, #31 @ encoding: [0x70,0x06,0xff,0xf3]
79 vqshlu.s32 q8, q8, #31
80 // CHECK: vqshlu.s64 q8, q8, #63 @ encoding: [0xf0,0x06,0xff,0xf3]
81 vqshlu.s64 q8, q8, #63
82 // CHECK: vqrshl.s8 d16, d16, d17 @ encoding: [0xb0,0x05,0x41,0xf2]
83 vqrshl.s8 d16, d16, d17
84 // CHECK: vqrshl.s16 d16, d16, d17 @ encoding: [0xb0,0x05,0x51,0xf2]
85 vqrshl.s16 d16, d16, d17
86 // CHECK: vqrshl.s32 d16, d16, d17 @ encoding: [0xb0,0x05,0x61,0xf2]
87 vqrshl.s32 d16, d16, d17
88 // CHECK: vqrshl.s64 d16, d16, d17 @ encoding: [0xb0,0x05,0x71,0xf2]
89 vqrshl.s64 d16, d16, d17
90 // CHECK: vqrshl.u8 d16, d16, d17 @ encoding: [0xb0,0x05,0x41,0xf3]
91 vqrshl.u8 d16, d16, d17
92 // CHECK: vqrshl.u16 d16, d16, d17 @ encoding: [0xb0,0x05,0x51,0xf3]
93 vqrshl.u16 d16, d16, d17
94 // CHECK: vqrshl.u32 d16, d16, d17 @ encoding: [0xb0,0x05,0x61,0xf3]
95 vqrshl.u32 d16, d16, d17
96 // CHECK: vqrshl.u64 d16, d16, d17 @ encoding: [0xb0,0x05,0x71,0xf3]
97 vqrshl.u64 d16, d16, d17
98 // CHECK: vqrshl.s8 q8, q8, q9 @ encoding: [0xf0,0x05,0x42,0xf2]
99 vqrshl.s8 q8, q8, q9
100 // CHECK: vqrshl.s16 q8, q8, q9 @ encoding: [0xf0,0x05,0x52,0xf2]
101 vqrshl.s16 q8, q8, q9
102 // CHECK: vqrshl.s32 q8, q8, q9 @ encoding: [0xf0,0x05,0x62,0xf2]
103 vqrshl.s32 q8, q8, q9
104 // CHECK: vqrshl.s64 q8, q8, q9 @ encoding: [0xf0,0x05,0x72,0xf2]
105 vqrshl.s64 q8, q8, q9
106 // CHECK: vqrshl.u8 q8, q8, q9 @ encoding: [0xf0,0x05,0x42,0xf3]
107 vqrshl.u8 q8, q8, q9
108 // CHECK: vqrshl.u16 q8, q8, q9 @ encoding: [0xf0,0x05,0x52,0xf3]
109 vqrshl.u16 q8, q8, q9
110 // CHECK: vqrshl.u32 q8, q8, q9 @ encoding: [0xf0,0x05,0x62,0xf3]
111 vqrshl.u32 q8, q8, q9
112 // CHECK: vqrshl.u64 q8, q8, q9 @ encoding: [0xf0,0x05,0x72,0xf3]
113 vqrshl.u64 q8, q8, q9
114 // CHECK: vqshrn.s16 d16, q8, #8 @ encoding: [0x30,0x09,0xc8,0xf2]
115 vqshrn.s16 d16, q8, #8
116 // CHECK: vqshrn.s32 d16, q8, #16 @ encoding: [0x30,0x09,0xd0,0xf2]
117 vqshrn.s32 d16, q8, #16
118 // CHECK: vqshrn.s64 d16, q8, #32 @ encoding: [0x30,0x09,0xe0,0xf2]
119 vqshrn.s64 d16, q8, #32
120 // CHECK: vqshrn.u16 d16, q8, #8 @ encoding: [0x30,0x09,0xc8,0xf3]
121 vqshrn.u16 d16, q8, #8
122 // CHECK: vqshrn.u32 d16, q8, #16 @ encoding: [0x30,0x09,0xd0,0xf3]
123 vqshrn.u32 d16, q8, #16
124 // CHECK: vqshrn.u64 d16, q8, #32 @ encoding: [0x30,0x09,0xe0,0xf3]
125 vqshrn.u64 d16, q8, #32
126 // CHECK: vqshrun.s16 d16, q8, #8 @ encoding: [0x30,0x08,0xc8,0xf3]
127 vqshrun.s16 d16, q8, #8
128 // CHECK: vqshrun.s32 d16, q8, #16 @ encoding: [0x30,0x08,0xd0,0xf3]
129 vqshrun.s32 d16, q8, #16
130 // CHECK: vqshrun.s64 d16, q8, #32 @ encoding: [0x30,0x08,0xe0,0xf3]
131 vqshrun.s64 d16, q8, #32
132 // CHECK: vqrshrn.s16 d16, q8, #8 @ encoding: [0x70,0x09,0xc8,0xf2]
133 vqrshrn.s16 d16, q8, #8
134 // CHECK: vqrshrn.s32 d16, q8, #16 @ encoding: [0x70,0x09,0xd0,0xf2]
135 vqrshrn.s32 d16, q8, #16
136 // CHECK: vqrshrn.s64 d16, q8, #32 @ encoding: [0x70,0x09,0xe0,0xf2]
137 vqrshrn.s64 d16, q8, #32
138 // CHECK: vqrshrn.u16 d16, q8, #8 @ encoding: [0x70,0x09,0xc8,0xf3]
139 vqrshrn.u16 d16, q8, #8
140 // CHECK: vqrshrn.u32 d16, q8, #16 @ encoding: [0x70,0x09,0xd0,0xf3]
141 vqrshrn.u32 d16, q8, #16
142 // CHECK: vqrshrn.u64 d16, q8, #32 @ encoding: [0x70,0x09,0xe0,0xf3]
143 vqrshrn.u64 d16, q8, #32
144 // CHECK: vqrshrun.s16 d16, q8, #8 @ encoding: [0x70,0x08,0xc8,0xf3]
145 vqrshrun.s16 d16, q8, #8
146 // CHECK: vqrshrun.s32 d16, q8, #16 @ encoding: [0x70,0x08,0xd0,0xf3]
147 vqrshrun.s32 d16, q8, #16
148 // CHECK: vqrshrun.s64 d16, q8, #32 @ encoding: [0x70,0x08,0xe0,0xf3]
149 vqrshrun.s64 d16, q8, #32