1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve %s -o - | FileCheck %s
4 define <2 x i64> @v2i64(i32 %index, i32 %TC, <2 x i64> %V1, <2 x i64> %V2) {
7 ; CHECK-NEXT: push {r4, r5, r6, lr}
8 ; CHECK-NEXT: vmov q1[2], q1[0], r0, r0
9 ; CHECK-NEXT: vmov.i64 q0, #0xffffffff
10 ; CHECK-NEXT: vand q1, q1, q0
11 ; CHECK-NEXT: movs r5, #0
12 ; CHECK-NEXT: vmov r0, r4, d3
13 ; CHECK-NEXT: vmov q2[2], q2[0], r1, r1
14 ; CHECK-NEXT: vmov lr, r12, d2
15 ; CHECK-NEXT: adds r6, r0, #1
16 ; CHECK-NEXT: adc r4, r4, #0
17 ; CHECK-NEXT: subs.w r0, lr, #-1
18 ; CHECK-NEXT: vmov q1[2], q1[0], lr, r6
19 ; CHECK-NEXT: sbcs r0, r12, #0
20 ; CHECK-NEXT: vmov q1[3], q1[1], r12, r4
21 ; CHECK-NEXT: csetm r12, lo
22 ; CHECK-NEXT: subs.w r6, r6, #-1
23 ; CHECK-NEXT: bfi r5, r12, #0, #8
24 ; CHECK-NEXT: sbcs r6, r4, #0
25 ; CHECK-NEXT: mov.w r0, #0
26 ; CHECK-NEXT: csetm r6, lo
27 ; CHECK-NEXT: bfi r5, r6, #8, #8
28 ; CHECK-NEXT: vmsr p0, r5
29 ; CHECK-NEXT: vpsel q1, q1, q0
30 ; CHECK-NEXT: vand q0, q2, q0
31 ; CHECK-NEXT: vmov r1, r4, d0
32 ; CHECK-NEXT: vmov r6, r5, d2
33 ; CHECK-NEXT: vmov d0, r2, r3
34 ; CHECK-NEXT: subs r1, r6, r1
35 ; CHECK-NEXT: sbcs.w r1, r5, r4
36 ; CHECK-NEXT: vmov r5, r4, d1
37 ; CHECK-NEXT: csetm r1, lo
38 ; CHECK-NEXT: vldr d1, [sp, #16]
39 ; CHECK-NEXT: bfi r0, r1, #0, #8
40 ; CHECK-NEXT: vmov r1, r6, d3
41 ; CHECK-NEXT: subs r1, r1, r5
42 ; CHECK-NEXT: sbcs.w r1, r6, r4
43 ; CHECK-NEXT: csetm r1, lo
44 ; CHECK-NEXT: bfi r0, r1, #8, #8
45 ; CHECK-NEXT: vmsr p0, r0
46 ; CHECK-NEXT: add r0, sp, #24
47 ; CHECK-NEXT: vldrw.u32 q1, [r0]
48 ; CHECK-NEXT: vpsel q0, q0, q1
49 ; CHECK-NEXT: vmov r0, r1, d0
50 ; CHECK-NEXT: vmov r2, r3, d1
51 ; CHECK-NEXT: pop {r4, r5, r6, pc}
52 %active.lane.mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 %index, i32 %TC)
53 %select = select <2 x i1> %active.lane.mask, <2 x i64> %V1, <2 x i64> %V2
57 define <4 x i32> @v4i32(i32 %index, i32 %TC, <4 x i32> %V1, <4 x i32> %V2) {
60 ; CHECK-NEXT: adr.w r12, .LCPI1_0
61 ; CHECK-NEXT: vdup.32 q1, r1
62 ; CHECK-NEXT: vldrw.u32 q0, [r12]
63 ; CHECK-NEXT: vqadd.u32 q0, q0, r0
64 ; CHECK-NEXT: add r0, sp, #8
65 ; CHECK-NEXT: vcmp.u32 hi, q1, q0
66 ; CHECK-NEXT: vldr d1, [sp]
67 ; CHECK-NEXT: vldrw.u32 q1, [r0]
68 ; CHECK-NEXT: vmov d0, r2, r3
69 ; CHECK-NEXT: vpsel q0, q0, q1
70 ; CHECK-NEXT: vmov r0, r1, d0
71 ; CHECK-NEXT: vmov r2, r3, d1
73 ; CHECK-NEXT: .p2align 4
74 ; CHECK-NEXT: @ %bb.1:
75 ; CHECK-NEXT: .LCPI1_0:
76 ; CHECK-NEXT: .long 0 @ 0x0
77 ; CHECK-NEXT: .long 1 @ 0x1
78 ; CHECK-NEXT: .long 2 @ 0x2
79 ; CHECK-NEXT: .long 3 @ 0x3
80 %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %TC)
81 %select = select <4 x i1> %active.lane.mask, <4 x i32> %V1, <4 x i32> %V2
85 define <7 x i32> @v7i32(i32 %index, i32 %TC, <7 x i32> %V1, <7 x i32> %V2) {
88 ; CHECK-NEXT: ldr.w r12, [sp, #40]
89 ; CHECK-NEXT: vdup.32 q3, r2
90 ; CHECK-NEXT: ldr r3, [sp, #32]
91 ; CHECK-NEXT: adr r2, .LCPI2_1
92 ; CHECK-NEXT: vmov q0[2], q0[0], r3, r12
93 ; CHECK-NEXT: ldr.w r12, [sp, #44]
94 ; CHECK-NEXT: ldr r3, [sp, #36]
95 ; CHECK-NEXT: vmov q0[3], q0[1], r3, r12
96 ; CHECK-NEXT: ldr.w r12, [sp, #8]
97 ; CHECK-NEXT: ldr r3, [sp]
98 ; CHECK-NEXT: vmov q1[2], q1[0], r3, r12
99 ; CHECK-NEXT: ldr.w r12, [sp, #12]
100 ; CHECK-NEXT: ldr r3, [sp, #4]
101 ; CHECK-NEXT: vmov q1[3], q1[1], r3, r12
102 ; CHECK-NEXT: adr r3, .LCPI2_0
103 ; CHECK-NEXT: vldrw.u32 q2, [r3]
104 ; CHECK-NEXT: vqadd.u32 q2, q2, r1
105 ; CHECK-NEXT: vcmp.u32 hi, q3, q2
106 ; CHECK-NEXT: vpsel q0, q1, q0
107 ; CHECK-NEXT: vstrw.32 q0, [r0]
108 ; CHECK-NEXT: vldrw.u32 q0, [r2]
109 ; CHECK-NEXT: ldr r2, [sp, #48]
110 ; CHECK-NEXT: vqadd.u32 q0, q0, r1
111 ; CHECK-NEXT: ldr r1, [sp, #52]
112 ; CHECK-NEXT: vcmp.u32 hi, q3, q0
113 ; CHECK-NEXT: vmov.32 q0[1], r1
114 ; CHECK-NEXT: ldr r1, [sp, #56]
115 ; CHECK-NEXT: vmov q0[2], q0[0], r2, r1
116 ; CHECK-NEXT: ldr r1, [sp, #20]
117 ; CHECK-NEXT: ldr r2, [sp, #16]
118 ; CHECK-NEXT: vmov.32 q1[1], r1
119 ; CHECK-NEXT: ldr r1, [sp, #24]
120 ; CHECK-NEXT: vmov q1[2], q1[0], r2, r1
121 ; CHECK-NEXT: vpsel q0, q1, q0
122 ; CHECK-NEXT: vmov r1, s2
123 ; CHECK-NEXT: vmov.f32 s2, s1
124 ; CHECK-NEXT: vmov r3, s0
125 ; CHECK-NEXT: vmov r2, s2
126 ; CHECK-NEXT: strd r3, r2, [r0, #16]
127 ; CHECK-NEXT: str r1, [r0, #24]
129 ; CHECK-NEXT: .p2align 4
130 ; CHECK-NEXT: @ %bb.1:
131 ; CHECK-NEXT: .LCPI2_0:
132 ; CHECK-NEXT: .long 0 @ 0x0
133 ; CHECK-NEXT: .long 1 @ 0x1
134 ; CHECK-NEXT: .long 2 @ 0x2
135 ; CHECK-NEXT: .long 3 @ 0x3
136 ; CHECK-NEXT: .LCPI2_1:
137 ; CHECK-NEXT: .long 4 @ 0x4
138 ; CHECK-NEXT: .long 5 @ 0x5
139 ; CHECK-NEXT: .long 6 @ 0x6
140 ; CHECK-NEXT: .zero 4
141 %active.lane.mask = call <7 x i1> @llvm.get.active.lane.mask.v7i1.i32(i32 %index, i32 %TC)
142 %select = select <7 x i1> %active.lane.mask, <7 x i32> %V1, <7 x i32> %V2
143 ret <7 x i32> %select
146 define <8 x i16> @v8i16(i32 %index, i32 %TC, <8 x i16> %V1, <8 x i16> %V2) {
147 ; CHECK-LABEL: v8i16:
149 ; CHECK-NEXT: push {r4, lr}
150 ; CHECK-NEXT: sub sp, #16
151 ; CHECK-NEXT: adr.w r12, .LCPI3_0
152 ; CHECK-NEXT: vdup.32 q1, r1
153 ; CHECK-NEXT: vldrw.u32 q0, [r12]
154 ; CHECK-NEXT: vmov.i8 q2, #0xff
155 ; CHECK-NEXT: mov r4, sp
156 ; CHECK-NEXT: adr r1, .LCPI3_1
157 ; CHECK-NEXT: vqadd.u32 q0, q0, r0
158 ; CHECK-NEXT: vcmp.u32 hi, q1, q0
159 ; CHECK-NEXT: vmov.i8 q0, #0x0
160 ; CHECK-NEXT: vpsel q3, q2, q0
161 ; CHECK-NEXT: vstrh.32 q3, [r4, #8]
162 ; CHECK-NEXT: vldrw.u32 q3, [r1]
163 ; CHECK-NEXT: vqadd.u32 q3, q3, r0
164 ; CHECK-NEXT: add r0, sp, #32
165 ; CHECK-NEXT: vcmp.u32 hi, q1, q3
166 ; CHECK-NEXT: vldrw.u32 q1, [r0]
167 ; CHECK-NEXT: vpsel q0, q2, q0
168 ; CHECK-NEXT: vstrh.32 q0, [r4]
169 ; CHECK-NEXT: vldr d1, [sp, #24]
170 ; CHECK-NEXT: vldrw.u32 q2, [r4]
171 ; CHECK-NEXT: vmov d0, r2, r3
172 ; CHECK-NEXT: vcmp.i16 ne, q2, zr
173 ; CHECK-NEXT: vpsel q0, q0, q1
174 ; CHECK-NEXT: vmov r0, r1, d0
175 ; CHECK-NEXT: vmov r2, r3, d1
176 ; CHECK-NEXT: add sp, #16
177 ; CHECK-NEXT: pop {r4, pc}
178 ; CHECK-NEXT: .p2align 4
179 ; CHECK-NEXT: @ %bb.1:
180 ; CHECK-NEXT: .LCPI3_0:
181 ; CHECK-NEXT: .long 4 @ 0x4
182 ; CHECK-NEXT: .long 5 @ 0x5
183 ; CHECK-NEXT: .long 6 @ 0x6
184 ; CHECK-NEXT: .long 7 @ 0x7
185 ; CHECK-NEXT: .LCPI3_1:
186 ; CHECK-NEXT: .long 0 @ 0x0
187 ; CHECK-NEXT: .long 1 @ 0x1
188 ; CHECK-NEXT: .long 2 @ 0x2
189 ; CHECK-NEXT: .long 3 @ 0x3
190 %active.lane.mask = call <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32 %index, i32 %TC)
191 %select = select <8 x i1> %active.lane.mask, <8 x i16> %V1, <8 x i16> %V2
192 ret <8 x i16> %select
195 define <16 x i8> @v16i8(i32 %index, i32 %TC, <16 x i8> %V1, <16 x i8> %V2) {
196 ; CHECK-LABEL: v16i8:
198 ; CHECK-NEXT: push {r4, r5, r7, lr}
199 ; CHECK-NEXT: sub sp, #48
200 ; CHECK-NEXT: adr.w r12, .LCPI4_0
201 ; CHECK-NEXT: vdup.32 q2, r1
202 ; CHECK-NEXT: vldrw.u32 q0, [r12]
203 ; CHECK-NEXT: vmov.i8 q1, #0xff
204 ; CHECK-NEXT: add r5, sp, #16
205 ; CHECK-NEXT: adr r1, .LCPI4_1
206 ; CHECK-NEXT: vqadd.u32 q0, q0, r0
207 ; CHECK-NEXT: adr r4, .LCPI4_3
208 ; CHECK-NEXT: vcmp.u32 hi, q2, q0
209 ; CHECK-NEXT: vmov.i8 q0, #0x0
210 ; CHECK-NEXT: vpsel q3, q1, q0
211 ; CHECK-NEXT: vstrh.32 q3, [r5, #8]
212 ; CHECK-NEXT: vldrw.u32 q3, [r1]
213 ; CHECK-NEXT: adr r1, .LCPI4_2
214 ; CHECK-NEXT: vqadd.u32 q3, q3, r0
215 ; CHECK-NEXT: vcmp.u32 hi, q2, q3
216 ; CHECK-NEXT: vpsel q3, q1, q0
217 ; CHECK-NEXT: vstrh.32 q3, [r5]
218 ; CHECK-NEXT: vldrw.u32 q3, [r1]
219 ; CHECK-NEXT: mov r1, sp
220 ; CHECK-NEXT: vqadd.u32 q3, q3, r0
221 ; CHECK-NEXT: vcmp.u32 hi, q2, q3
222 ; CHECK-NEXT: vpsel q3, q1, q0
223 ; CHECK-NEXT: vstrh.32 q3, [r1, #8]
224 ; CHECK-NEXT: vldrw.u32 q3, [r4]
225 ; CHECK-NEXT: vqadd.u32 q3, q3, r0
226 ; CHECK-NEXT: add r0, sp, #32
227 ; CHECK-NEXT: vcmp.u32 hi, q2, q3
228 ; CHECK-NEXT: vpsel q2, q1, q0
229 ; CHECK-NEXT: vstrh.32 q2, [r1]
230 ; CHECK-NEXT: vldrw.u32 q2, [r5]
231 ; CHECK-NEXT: vcmp.i16 ne, q2, zr
232 ; CHECK-NEXT: vpsel q2, q1, q0
233 ; CHECK-NEXT: vstrb.16 q2, [r0, #8]
234 ; CHECK-NEXT: vldrw.u32 q2, [r1]
235 ; CHECK-NEXT: add r1, sp, #72
236 ; CHECK-NEXT: vcmp.i16 ne, q2, zr
237 ; CHECK-NEXT: vpsel q0, q1, q0
238 ; CHECK-NEXT: vldrw.u32 q1, [r1]
239 ; CHECK-NEXT: vstrb.16 q0, [r0]
240 ; CHECK-NEXT: vldr d1, [sp, #64]
241 ; CHECK-NEXT: vldrw.u32 q2, [r0]
242 ; CHECK-NEXT: vmov d0, r2, r3
243 ; CHECK-NEXT: vcmp.i8 ne, q2, zr
244 ; CHECK-NEXT: vpsel q0, q0, q1
245 ; CHECK-NEXT: vmov r0, r1, d0
246 ; CHECK-NEXT: vmov r2, r3, d1
247 ; CHECK-NEXT: add sp, #48
248 ; CHECK-NEXT: pop {r4, r5, r7, pc}
249 ; CHECK-NEXT: .p2align 4
250 ; CHECK-NEXT: @ %bb.1:
251 ; CHECK-NEXT: .LCPI4_0:
252 ; CHECK-NEXT: .long 12 @ 0xc
253 ; CHECK-NEXT: .long 13 @ 0xd
254 ; CHECK-NEXT: .long 14 @ 0xe
255 ; CHECK-NEXT: .long 15 @ 0xf
256 ; CHECK-NEXT: .LCPI4_1:
257 ; CHECK-NEXT: .long 8 @ 0x8
258 ; CHECK-NEXT: .long 9 @ 0x9
259 ; CHECK-NEXT: .long 10 @ 0xa
260 ; CHECK-NEXT: .long 11 @ 0xb
261 ; CHECK-NEXT: .LCPI4_2:
262 ; CHECK-NEXT: .long 4 @ 0x4
263 ; CHECK-NEXT: .long 5 @ 0x5
264 ; CHECK-NEXT: .long 6 @ 0x6
265 ; CHECK-NEXT: .long 7 @ 0x7
266 ; CHECK-NEXT: .LCPI4_3:
267 ; CHECK-NEXT: .long 0 @ 0x0
268 ; CHECK-NEXT: .long 1 @ 0x1
269 ; CHECK-NEXT: .long 2 @ 0x2
270 ; CHECK-NEXT: .long 3 @ 0x3
271 %active.lane.mask = call <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32 %index, i32 %TC)
272 %select = select <16 x i1> %active.lane.mask, <16 x i8> %V1, <16 x i8> %V2
273 ret <16 x i8> %select
276 define void @test_width2(ptr nocapture readnone %x, ptr nocapture %y, i8 zeroext %m) {
277 ; CHECK-LABEL: test_width2:
278 ; CHECK: @ %bb.0: @ %entry
279 ; CHECK-NEXT: push {r7, lr}
280 ; CHECK-NEXT: sub sp, #4
281 ; CHECK-NEXT: cmp r2, #0
282 ; CHECK-NEXT: beq .LBB5_3
283 ; CHECK-NEXT: @ %bb.1: @ %for.body.preheader
284 ; CHECK-NEXT: adds r0, r2, #1
285 ; CHECK-NEXT: movs r3, #1
286 ; CHECK-NEXT: bic r0, r0, #1
287 ; CHECK-NEXT: subs r0, #2
288 ; CHECK-NEXT: add.w r0, r3, r0, lsr #1
289 ; CHECK-NEXT: dls lr, r0
290 ; CHECK-NEXT: .LBB5_2: @ %vector.body
291 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1
292 ; CHECK-NEXT: vctp.64 r2
293 ; CHECK-NEXT: @ implicit-def: $q0
294 ; CHECK-NEXT: subs r2, #2
295 ; CHECK-NEXT: vmrs r3, p0
296 ; CHECK-NEXT: and r0, r3, #1
297 ; CHECK-NEXT: ubfx r3, r3, #8, #1
298 ; CHECK-NEXT: rsb.w r12, r0, #0
299 ; CHECK-NEXT: movs r0, #0
300 ; CHECK-NEXT: rsbs r3, r3, #0
301 ; CHECK-NEXT: bfi r0, r12, #0, #1
302 ; CHECK-NEXT: sub.w r12, r1, #8
303 ; CHECK-NEXT: bfi r0, r3, #1, #1
304 ; CHECK-NEXT: lsls r3, r0, #31
306 ; CHECK-NEXT: ldrne.w r3, [r12]
307 ; CHECK-NEXT: vmovne.32 q0[0], r3
308 ; CHECK-NEXT: lsls r0, r0, #30
310 ; CHECK-NEXT: ldrmi.w r0, [r12, #4]
311 ; CHECK-NEXT: vmovmi.32 q0[2], r0
312 ; CHECK-NEXT: vmrs r3, p0
313 ; CHECK-NEXT: and r0, r3, #1
314 ; CHECK-NEXT: ubfx r3, r3, #8, #1
315 ; CHECK-NEXT: rsb.w r12, r0, #0
316 ; CHECK-NEXT: movs r0, #0
317 ; CHECK-NEXT: rsbs r3, r3, #0
318 ; CHECK-NEXT: bfi r0, r12, #0, #1
319 ; CHECK-NEXT: bfi r0, r3, #1, #1
320 ; CHECK-NEXT: lsls r3, r0, #31
322 ; CHECK-NEXT: vmovne r3, s0
323 ; CHECK-NEXT: strne r3, [r1]
324 ; CHECK-NEXT: lsls r0, r0, #30
326 ; CHECK-NEXT: vmovmi r0, s2
327 ; CHECK-NEXT: strmi r0, [r1, #4]
328 ; CHECK-NEXT: adds r1, #8
329 ; CHECK-NEXT: le lr, .LBB5_2
330 ; CHECK-NEXT: .LBB5_3: @ %for.cond.cleanup
331 ; CHECK-NEXT: add sp, #4
332 ; CHECK-NEXT: pop {r7, pc}
334 %cmp9.not = icmp eq i8 %m, 0
335 br i1 %cmp9.not, label %for.cond.cleanup, label %for.body.preheader
337 for.body.preheader: ; preds = %entry
338 %wide.trip.count = zext i8 %m to i32
339 %n.rnd.up = add nuw nsw i32 %wide.trip.count, 1
340 %n.vec = and i32 %n.rnd.up, 510
341 br label %vector.body
343 vector.body: ; preds = %vector.body, %for.body.preheader
344 %index = phi i32 [ 0, %for.body.preheader ], [ %index.next, %vector.body ]
345 %active.lane.mask = call <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32 %index, i32 %wide.trip.count)
346 %0 = add nsw i32 %index, -2
347 %1 = getelementptr inbounds i32, ptr %y, i32 %0
348 %wide.masked.load = call <2 x i32> @llvm.masked.load.v2i32.p0(ptr %1, i32 4, <2 x i1> %active.lane.mask, <2 x i32> undef)
349 %2 = getelementptr inbounds i32, ptr %y, i32 %index
350 call void @llvm.masked.store.v2i32.p0(<2 x i32> %wide.masked.load, ptr %2, i32 4, <2 x i1> %active.lane.mask)
351 %index.next = add i32 %index, 2
352 %3 = icmp eq i32 %index.next, %n.vec
353 br i1 %3, label %for.cond.cleanup, label %vector.body
355 for.cond.cleanup: ; preds = %vector.body, %entry
359 declare <2 x i1> @llvm.get.active.lane.mask.v2i1.i32(i32, i32)
360 declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)
361 declare <7 x i1> @llvm.get.active.lane.mask.v7i1.i32(i32, i32)
362 declare <8 x i1> @llvm.get.active.lane.mask.v8i1.i32(i32, i32)
363 declare <16 x i1> @llvm.get.active.lane.mask.v16i1.i32(i32, i32)
364 declare <2 x i32> @llvm.masked.load.v2i32.p0(ptr, i32, <2 x i1>, <2 x i32>)
365 declare void @llvm.masked.store.v2i32.p0(<2 x i32>, ptr, i32, <2 x i1>)