1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=arm-eabi -mattr=+neon %s -o - -lower-interleaved-accesses=false | FileCheck %s
4 define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
5 ; CHECK-LABEL: test_vextd:
7 ; CHECK-NEXT: vldr d16, [r1]
8 ; CHECK-NEXT: vldr d17, [r0]
9 ; CHECK-NEXT: vext.8 d16, d17, d16, #3
10 ; CHECK-NEXT: vmov r0, r1, d16
11 ; CHECK-NEXT: mov pc, lr
12 %tmp1 = load <8 x i8>, <8 x i8>* %A
13 %tmp2 = load <8 x i8>, <8 x i8>* %B
14 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10>
18 define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind {
19 ; CHECK-LABEL: test_vextRd:
21 ; CHECK-NEXT: vldr d16, [r0]
22 ; CHECK-NEXT: vldr d17, [r1]
23 ; CHECK-NEXT: vext.8 d16, d17, d16, #5
24 ; CHECK-NEXT: vmov r0, r1, d16
25 ; CHECK-NEXT: mov pc, lr
26 %tmp1 = load <8 x i8>, <8 x i8>* %A
27 %tmp2 = load <8 x i8>, <8 x i8>* %B
28 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4>
32 define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
33 ; CHECK-LABEL: test_vextq:
35 ; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
36 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
37 ; CHECK-NEXT: vext.8 q8, q9, q8, #3
38 ; CHECK-NEXT: vmov r0, r1, d16
39 ; CHECK-NEXT: vmov r2, r3, d17
40 ; CHECK-NEXT: mov pc, lr
41 %tmp1 = load <16 x i8>, <16 x i8>* %A
42 %tmp2 = load <16 x i8>, <16 x i8>* %B
43 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
47 define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind {
48 ; CHECK-LABEL: test_vextRq:
50 ; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
51 ; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
52 ; CHECK-NEXT: vext.8 q8, q9, q8, #7
53 ; CHECK-NEXT: vmov r0, r1, d16
54 ; CHECK-NEXT: vmov r2, r3, d17
55 ; CHECK-NEXT: mov pc, lr
56 %tmp1 = load <16 x i8>, <16 x i8>* %A
57 %tmp2 = load <16 x i8>, <16 x i8>* %B
58 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6>
62 define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
63 ; CHECK-LABEL: test_vextd16:
65 ; CHECK-NEXT: vldr d16, [r1]
66 ; CHECK-NEXT: vldr d17, [r0]
67 ; CHECK-NEXT: vext.16 d16, d17, d16, #3
68 ; CHECK-NEXT: vmov r0, r1, d16
69 ; CHECK-NEXT: mov pc, lr
70 %tmp1 = load <4 x i16>, <4 x i16>* %A
71 %tmp2 = load <4 x i16>, <4 x i16>* %B
72 %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
76 define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
77 ; CHECK-LABEL: test_vextq32:
79 ; CHECK-NEXT: vld1.64 {d16, d17}, [r1]
80 ; CHECK-NEXT: vld1.64 {d18, d19}, [r0]
81 ; CHECK-NEXT: vext.32 q8, q9, q8, #3
82 ; CHECK-NEXT: vmov r0, r1, d16
83 ; CHECK-NEXT: vmov r2, r3, d17
84 ; CHECK-NEXT: mov pc, lr
85 %tmp1 = load <4 x i32>, <4 x i32>* %A
86 %tmp2 = load <4 x i32>, <4 x i32>* %B
87 %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> <i32 3, i32 4, i32 5, i32 6>
91 ; Undef shuffle indices should not prevent matching to VEXT:
93 define <8 x i8> @test_vextd_undef(<8 x i8>* %A, <8 x i8>* %B) nounwind {
94 ; CHECK-LABEL: test_vextd_undef:
96 ; CHECK-NEXT: vldr d16, [r1]
97 ; CHECK-NEXT: vldr d17, [r0]
98 ; CHECK-NEXT: vext.8 d16, d17, d16, #3
99 ; CHECK-NEXT: vmov r0, r1, d16
100 ; CHECK-NEXT: mov pc, lr
101 %tmp1 = load <8 x i8>, <8 x i8>* %A
102 %tmp2 = load <8 x i8>, <8 x i8>* %B
103 %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> <i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10>
107 define <16 x i8> @test_vextRq_undef(<16 x i8>* %A, <16 x i8>* %B) nounwind {
108 ; CHECK-LABEL: test_vextRq_undef:
110 ; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
111 ; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
112 ; CHECK-NEXT: vext.8 q8, q9, q8, #7
113 ; CHECK-NEXT: vmov r0, r1, d16
114 ; CHECK-NEXT: vmov r2, r3, d17
115 ; CHECK-NEXT: mov pc, lr
116 %tmp1 = load <16 x i8>, <16 x i8>* %A
117 %tmp2 = load <16 x i8>, <16 x i8>* %B
118 %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> <i32 23, i32 24, i32 25, i32 26, i32 undef, i32 undef, i32 29, i32 30, i32 31, i32 0, i32 1, i32 2, i32 3, i32 4, i32 undef, i32 6>
122 define <16 x i8> @test_vextq_undef_op2(<16 x i8> %a) nounwind {
123 ; CHECK-LABEL: test_vextq_undef_op2:
124 ; CHECK: @ %bb.0: @ %entry
125 ; CHECK-NEXT: vmov d17, r2, r3
126 ; CHECK-NEXT: vmov d16, r0, r1
127 ; CHECK-NEXT: vext.8 q8, q8, q8, #2
128 ; CHECK-NEXT: vmov r0, r1, d16
129 ; CHECK-NEXT: vmov r2, r3, d17
130 ; CHECK-NEXT: mov pc, lr
132 %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
136 define <8 x i8> @test_vextd_undef_op2(<8 x i8> %a) nounwind {
137 ; CHECK-LABEL: test_vextd_undef_op2:
138 ; CHECK: @ %bb.0: @ %entry
139 ; CHECK-NEXT: vmov d16, r0, r1
140 ; CHECK-NEXT: vext.8 d16, d16, d16, #2
141 ; CHECK-NEXT: vmov r0, r1, d16
142 ; CHECK-NEXT: mov pc, lr
144 %tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 0, i32 1>
149 define <16 x i8> @test_vextq_undef_op2_undef(<16 x i8> %a) nounwind {
150 ; CHECK-LABEL: test_vextq_undef_op2_undef:
151 ; CHECK: @ %bb.0: @ %entry
152 ; CHECK-NEXT: vmov d17, r2, r3
153 ; CHECK-NEXT: vmov d16, r0, r1
154 ; CHECK-NEXT: vext.8 q8, q8, q8, #2
155 ; CHECK-NEXT: vmov r0, r1, d16
156 ; CHECK-NEXT: vmov r2, r3, d17
157 ; CHECK-NEXT: mov pc, lr
159 %tmp1 = shufflevector <16 x i8> %a, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 4, i32 undef, i32 undef, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 0, i32 1>
163 define <8 x i8> @test_vextd_undef_op2_undef(<8 x i8> %a) nounwind {
164 ; CHECK-LABEL: test_vextd_undef_op2_undef:
165 ; CHECK: @ %bb.0: @ %entry
166 ; CHECK-NEXT: vmov d16, r0, r1
167 ; CHECK-NEXT: vext.8 d16, d16, d16, #2
168 ; CHECK-NEXT: vmov r0, r1, d16
169 ; CHECK-NEXT: mov pc, lr
171 %tmp1 = shufflevector <8 x i8> %a, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 6, i32 7, i32 undef, i32 1>
175 ; Tests for ReconstructShuffle function. Indices have to be carefully
176 ; chosen to reach lowering phase as a BUILD_VECTOR.
178 ; One vector needs vext, the other can be handled by extract_subvector
179 ; Also checks interleaving of sources is handled correctly.
180 ; Essence: a vext is used on %A and something saner than stack load/store for final result.
181 define <4 x i16> @test_interleaved(<8 x i16>* %A, <8 x i16>* %B) nounwind {
182 ; CHECK-LABEL: test_interleaved:
184 ; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
185 ; CHECK-NEXT: vext.16 d16, d16, d17, #3
186 ; CHECK-NEXT: vorr d17, d16, d16
187 ; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
188 ; CHECK-NEXT: vuzp.16 d16, d17
189 ; CHECK-NEXT: vzip.16 d16, d18
190 ; CHECK-NEXT: vmov r0, r1, d16
191 ; CHECK-NEXT: mov pc, lr
192 %tmp1 = load <8 x i16>, <8 x i16>* %A
193 %tmp2 = load <8 x i16>, <8 x i16>* %B
194 %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 3, i32 8, i32 5, i32 9>
198 ; An undef in the shuffle list should still be optimizable
199 define <4 x i16> @test_undef(<8 x i16>* %A, <8 x i16>* %B) nounwind {
200 ; CHECK-LABEL: test_undef:
202 ; CHECK-NEXT: vldr d16, [r1]
203 ; CHECK-NEXT: vldr d17, [r0, #8]
204 ; CHECK-NEXT: vzip.16 d17, d16
205 ; CHECK-NEXT: vmov r0, r1, d17
206 ; CHECK-NEXT: mov pc, lr
207 %tmp1 = load <8 x i16>, <8 x i16>* %A
208 %tmp2 = load <8 x i16>, <8 x i16>* %B
209 %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <4 x i32> <i32 undef, i32 8, i32 5, i32 9>
213 ; We should ignore a build_vector with more than two sources.
214 ; Use illegal <32 x i16> type to produce such a shuffle after legalizing types.
215 ; Try to look for fallback to by-element inserts.
216 define <4 x i16> @test_multisource(<32 x i16>* %B) nounwind {
217 ; CHECK-LABEL: test_multisource:
219 ; CHECK-NEXT: mov r1, r0
220 ; CHECK-NEXT: add r2, r0, #48
221 ; CHECK-NEXT: add r0, r0, #32
222 ; CHECK-NEXT: vld1.16 {d16, d17}, [r1:128]!
223 ; CHECK-NEXT: vld1.64 {d20, d21}, [r0:128]
224 ; CHECK-NEXT: vorr d24, d20, d20
225 ; CHECK-NEXT: vld1.64 {d18, d19}, [r2:128]
226 ; CHECK-NEXT: vld1.64 {d22, d23}, [r1:128]
227 ; CHECK-NEXT: vzip.16 d24, d18
228 ; CHECK-NEXT: vtrn.16 q8, q11
229 ; CHECK-NEXT: vext.16 d18, d20, d24, #2
230 ; CHECK-NEXT: vext.16 d16, d18, d16, #2
231 ; CHECK-NEXT: vext.16 d16, d16, d16, #2
232 ; CHECK-NEXT: vmov r0, r1, d16
233 ; CHECK-NEXT: mov pc, lr
234 %tmp1 = load <32 x i16>, <32 x i16>* %B
235 %tmp2 = shufflevector <32 x i16> %tmp1, <32 x i16> undef, <4 x i32> <i32 0, i32 8, i32 16, i32 24>
239 ; We don't handle shuffles using more than half of a 128-bit vector.
240 ; Again, test for fallback to by-element inserts.
241 define <4 x i16> @test_largespan(<8 x i16>* %B) nounwind {
242 ; CHECK-LABEL: test_largespan:
244 ; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
245 ; CHECK-NEXT: vorr d18, d16, d16
246 ; CHECK-NEXT: vuzp.16 d18, d17
247 ; CHECK-NEXT: vmov r0, r1, d18
248 ; CHECK-NEXT: mov pc, lr
249 %tmp1 = load <8 x i16>, <8 x i16>* %B
250 %tmp2 = shufflevector <8 x i16> %tmp1, <8 x i16> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
254 ; The actual shuffle code only handles some cases, make sure we check
255 ; this rather than blindly emitting a VECTOR_SHUFFLE (infinite
256 ; lowering loop can result otherwise).
257 ; (There are probably better ways to lower this shuffle, but it's not
259 define <8 x i16> @test_illegal(<8 x i16>* %A, <8 x i16>* %B) nounwind {
260 ; CHECK-LABEL: test_illegal:
262 ; CHECK-NEXT: vld1.64 {d16, d17}, [r0]
263 ; CHECK-NEXT: vorr d22, d16, d16
264 ; CHECK-NEXT: vmov.u16 r0, d16[0]
265 ; CHECK-NEXT: vorr d23, d16, d16
266 ; CHECK-NEXT: vmov.u16 r2, d17[3]
267 ; CHECK-NEXT: vmov.u16 r3, d17[1]
268 ; CHECK-NEXT: vld1.64 {d18, d19}, [r1]
269 ; CHECK-NEXT: vmov.u16 r1, d19[1]
270 ; CHECK-NEXT: vuzp.16 d22, d23
271 ; CHECK-NEXT: vuzp.16 d22, d18
272 ; CHECK-NEXT: vmov.16 d20[0], r0
273 ; CHECK-NEXT: vmov.16 d20[1], r2
274 ; CHECK-NEXT: vmov.16 d20[2], r3
275 ; CHECK-NEXT: vmov.16 d20[3], r1
276 ; CHECK-NEXT: vext.16 d21, d16, d18, #3
277 ; CHECK-NEXT: vmov r0, r1, d20
278 ; CHECK-NEXT: vmov r2, r3, d21
279 ; CHECK-NEXT: mov pc, lr
280 %tmp1 = load <8 x i16>, <8 x i16>* %A
281 %tmp2 = load <8 x i16>, <8 x i16>* %B
282 %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> <i32 0, i32 7, i32 5, i32 13, i32 3, i32 2, i32 2, i32 9>
287 ; Make sure this doesn't crash
288 define arm_aapcscc void @test_elem_mismatch(<2 x i64>* nocapture %src, <4 x i16>* nocapture %dest) nounwind {
289 ; CHECK-LABEL: test_elem_mismatch:
291 ; CHECK-NEXT: vld1.64 {d16, d17}, [r0:128]
292 ; CHECK-NEXT: vmov.32 r0, d16[0]
293 ; CHECK-NEXT: vmov.32 r2, d17[0]
294 ; CHECK-NEXT: vmov.16 d16[0], r0
295 ; CHECK-NEXT: vmov.16 d16[1], r2
296 ; CHECK-NEXT: vstr d16, [r1]
297 ; CHECK-NEXT: mov pc, lr
298 %tmp0 = load <2 x i64>, <2 x i64>* %src, align 16
299 %tmp1 = bitcast <2 x i64> %tmp0 to <4 x i32>
300 %tmp2 = extractelement <4 x i32> %tmp1, i32 0
301 %tmp3 = extractelement <4 x i32> %tmp1, i32 2
302 %tmp4 = trunc i32 %tmp2 to i16
303 %tmp5 = trunc i32 %tmp3 to i16
304 %tmp6 = insertelement <4 x i16> undef, i16 %tmp4, i32 0
305 %tmp7 = insertelement <4 x i16> %tmp6, i16 %tmp5, i32 1
306 store <4 x i16> %tmp7, <4 x i16>* %dest, align 4
310 define <4 x i32> @test_reverse_and_extract(<2 x i32>* %A) {
311 ; CHECK-LABEL: test_reverse_and_extract:
312 ; CHECK: @ %bb.0: @ %entry
313 ; CHECK-NEXT: vldr d16, [r0]
314 ; CHECK-NEXT: vrev64.32 q9, q8
315 ; CHECK-NEXT: vext.32 q8, q8, q9, #2
316 ; CHECK-NEXT: vmov r0, r1, d16
317 ; CHECK-NEXT: vmov r2, r3, d17
318 ; CHECK-NEXT: mov pc, lr
320 %tmp1 = load <2 x i32>, <2 x i32>* %A
321 %0 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> <i32 undef, i32 undef, i32 1, i32 0>
325 define <4 x i32> @test_dup_and_extract(<2 x i32>* %A) {
326 ; CHECK-LABEL: test_dup_and_extract:
327 ; CHECK: @ %bb.0: @ %entry
328 ; CHECK-NEXT: vldr d16, [r0]
329 ; CHECK-NEXT: vdup.32 q9, d16[0]
330 ; CHECK-NEXT: vext.32 q8, q9, q8, #2
331 ; CHECK-NEXT: vmov r0, r1, d16
332 ; CHECK-NEXT: vmov r2, r3, d17
333 ; CHECK-NEXT: mov pc, lr
335 %tmp1 = load <2 x i32>, <2 x i32>* %A
336 %0 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> <i32 0, i32 0, i32 0, i32 1>
340 define <4 x i32> @test_zip_and_extract(<2 x i32>* %A) {
341 ; CHECK-LABEL: test_zip_and_extract:
342 ; CHECK: @ %bb.0: @ %entry
343 ; CHECK-NEXT: vldr d16, [r0]
344 ; CHECK-NEXT: vorr q9, q8, q8
345 ; CHECK-NEXT: vorr q10, q8, q8
346 ; CHECK-NEXT: vzip.32 q9, q10
347 ; CHECK-NEXT: vext.32 q8, q9, q8, #2
348 ; CHECK-NEXT: vmov r0, r1, d16
349 ; CHECK-NEXT: vmov r2, r3, d17
350 ; CHECK-NEXT: mov pc, lr
352 %tmp1 = load <2 x i32>, <2 x i32>* %A
353 %0 = shufflevector <2 x i32> %tmp1, <2 x i32> undef, <4 x i32> <i32 1, i32 1, i32 0, i32 1>