1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -enable-arm-maskedldst -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
3 ; RUN: llc -mtriple=thumbebv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -enable-arm-maskedldst -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
5 define void @foo_v4i32_v4i32(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
6 ; CHECK-LABEL: foo_v4i32_v4i32:
7 ; CHECK: @ %bb.0: @ %entry
8 ; CHECK-NEXT: vldrw.u32 q0, [r1]
9 ; CHECK-NEXT: vptt.s32 gt, q0, zr
10 ; CHECK-NEXT: vldrwt.u32 q0, [r2]
11 ; CHECK-NEXT: vstrwt.32 q0, [r0]
14 %0 = load <4 x i32>, <4 x i32>* %mask, align 4
15 %1 = icmp sgt <4 x i32> %0, zeroinitializer
16 %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
17 call void @llvm.masked.store.v4i32(<4 x i32> %2, <4 x i32>* %dest, i32 4, <4 x i1> %1)
21 define void @foo_sext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%src) {
22 ; CHECK-LABEL: foo_sext_v4i32_v4i8:
23 ; CHECK: @ %bb.0: @ %entry
24 ; CHECK-NEXT: vldrw.u32 q0, [r1]
25 ; CHECK-NEXT: vptt.s32 gt, q0, zr
26 ; CHECK-NEXT: vldrbt.s32 q0, [r2]
27 ; CHECK-NEXT: vstrwt.32 q0, [r0]
30 %0 = load <4 x i32>, <4 x i32>* %mask, align 4
31 %1 = icmp sgt <4 x i32> %0, zeroinitializer
32 %2 = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %src, i32 1, <4 x i1> %1, <4 x i8> undef)
33 %3 = sext <4 x i8> %2 to <4 x i32>
34 call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
38 define void @foo_sext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16> *%src) {
39 ; CHECK-LABEL: foo_sext_v4i32_v4i16:
40 ; CHECK: @ %bb.0: @ %entry
41 ; CHECK-NEXT: vldrw.u32 q0, [r1]
42 ; CHECK-NEXT: vptt.s32 gt, q0, zr
43 ; CHECK-NEXT: vldrht.s32 q0, [r2]
44 ; CHECK-NEXT: vstrwt.32 q0, [r0]
47 %0 = load <4 x i32>, <4 x i32>* %mask, align 4
48 %1 = icmp sgt <4 x i32> %0, zeroinitializer
49 %2 = call <4 x i16> @llvm.masked.load.v4i16(<4 x i16>* %src, i32 2, <4 x i1> %1, <4 x i16> undef)
50 %3 = sext <4 x i16> %2 to <4 x i32>
51 call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
55 define void @foo_zext_v4i32_v4i8(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i8> *%src) {
56 ; CHECK-LABEL: foo_zext_v4i32_v4i8:
57 ; CHECK: @ %bb.0: @ %entry
58 ; CHECK-NEXT: vldrw.u32 q0, [r1]
59 ; CHECK-NEXT: vptt.s32 gt, q0, zr
60 ; CHECK-NEXT: vldrbt.u32 q0, [r2]
61 ; CHECK-NEXT: vstrwt.32 q0, [r0]
64 %0 = load <4 x i32>, <4 x i32>* %mask, align 4
65 %1 = icmp sgt <4 x i32> %0, zeroinitializer
66 %2 = call <4 x i8> @llvm.masked.load.v4i8(<4 x i8>* %src, i32 1, <4 x i1> %1, <4 x i8> undef)
67 %3 = zext <4 x i8> %2 to <4 x i32>
68 call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
72 define void @foo_zext_v4i32_v4i16(<4 x i32> *%dest, <4 x i32> *%mask, <4 x i16> *%src) {
73 ; CHECK-LABEL: foo_zext_v4i32_v4i16:
74 ; CHECK: @ %bb.0: @ %entry
75 ; CHECK-NEXT: vldrw.u32 q0, [r1]
76 ; CHECK-NEXT: vptt.s32 gt, q0, zr
77 ; CHECK-NEXT: vldrht.u32 q0, [r2]
78 ; CHECK-NEXT: vstrwt.32 q0, [r0]
81 %0 = load <4 x i32>, <4 x i32>* %mask, align 4
82 %1 = icmp sgt <4 x i32> %0, zeroinitializer
83 %2 = call <4 x i16> @llvm.masked.load.v4i16(<4 x i16>* %src, i32 2, <4 x i1> %1, <4 x i16> undef)
84 %3 = zext <4 x i16> %2 to <4 x i32>
85 call void @llvm.masked.store.v4i32(<4 x i32> %3, <4 x i32>* %dest, i32 4, <4 x i1> %1)
89 define void @foo_sext_v2i64_v2i32(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) {
90 ; CHECK-LE-LABEL: foo_sext_v2i64_v2i32:
91 ; CHECK-LE: @ %bb.0: @ %entry
92 ; CHECK-LE-NEXT: .save {r4, r5, r7, lr}
93 ; CHECK-LE-NEXT: push {r4, r5, r7, lr}
94 ; CHECK-LE-NEXT: .pad #4
95 ; CHECK-LE-NEXT: sub sp, #4
96 ; CHECK-LE-NEXT: ldrd lr, r12, [r1]
97 ; CHECK-LE-NEXT: movs r1, #0
98 ; CHECK-LE-NEXT: @ implicit-def: $q1
99 ; CHECK-LE-NEXT: movs r4, #0
100 ; CHECK-LE-NEXT: rsbs.w r3, lr, #0
101 ; CHECK-LE-NEXT: vmov.32 q0[0], lr
102 ; CHECK-LE-NEXT: sbcs.w r3, r1, lr, asr #31
103 ; CHECK-LE-NEXT: mov.w lr, #0
104 ; CHECK-LE-NEXT: it lt
105 ; CHECK-LE-NEXT: movlt.w lr, #1
106 ; CHECK-LE-NEXT: rsbs.w r3, r12, #0
107 ; CHECK-LE-NEXT: sbcs.w r3, r1, r12, asr #31
108 ; CHECK-LE-NEXT: it lt
109 ; CHECK-LE-NEXT: movlt r1, #1
110 ; CHECK-LE-NEXT: cmp r1, #0
111 ; CHECK-LE-NEXT: it ne
112 ; CHECK-LE-NEXT: mvnne r1, #1
113 ; CHECK-LE-NEXT: bfi r1, lr, #0, #1
114 ; CHECK-LE-NEXT: vmov.32 q0[2], r12
115 ; CHECK-LE-NEXT: and r3, r1, #3
116 ; CHECK-LE-NEXT: lsls r1, r1, #31
117 ; CHECK-LE-NEXT: itt ne
118 ; CHECK-LE-NEXT: ldrne r1, [r2]
119 ; CHECK-LE-NEXT: vmovne.32 q1[0], r1
120 ; CHECK-LE-NEXT: lsls r1, r3, #30
121 ; CHECK-LE-NEXT: itt mi
122 ; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
123 ; CHECK-LE-NEXT: vmovmi.32 q1[2], r1
124 ; CHECK-LE-NEXT: vmov r2, s0
125 ; CHECK-LE-NEXT: vmov r3, s4
126 ; CHECK-LE-NEXT: vmov r1, s6
127 ; CHECK-LE-NEXT: vmov.32 q1[0], r3
128 ; CHECK-LE-NEXT: rsbs r5, r2, #0
129 ; CHECK-LE-NEXT: sbcs.w r2, r4, r2, asr #31
130 ; CHECK-LE-NEXT: vmov r2, s2
131 ; CHECK-LE-NEXT: asr.w lr, r3, #31
132 ; CHECK-LE-NEXT: vmov.32 q1[1], lr
133 ; CHECK-LE-NEXT: asr.w r12, r1, #31
134 ; CHECK-LE-NEXT: vmov.32 q1[2], r1
135 ; CHECK-LE-NEXT: mov.w r1, #0
136 ; CHECK-LE-NEXT: it lt
137 ; CHECK-LE-NEXT: movlt r1, #1
138 ; CHECK-LE-NEXT: vmov.32 q1[3], r12
139 ; CHECK-LE-NEXT: rsbs r3, r2, #0
140 ; CHECK-LE-NEXT: sbcs.w r2, r4, r2, asr #31
141 ; CHECK-LE-NEXT: it lt
142 ; CHECK-LE-NEXT: movlt r4, #1
143 ; CHECK-LE-NEXT: cmp r4, #0
144 ; CHECK-LE-NEXT: it ne
145 ; CHECK-LE-NEXT: mvnne r4, #1
146 ; CHECK-LE-NEXT: bfi r4, r1, #0, #1
147 ; CHECK-LE-NEXT: and r1, r4, #3
148 ; CHECK-LE-NEXT: lsls r2, r4, #31
149 ; CHECK-LE-NEXT: it ne
150 ; CHECK-LE-NEXT: vstrne d2, [r0]
151 ; CHECK-LE-NEXT: lsls r1, r1, #30
152 ; CHECK-LE-NEXT: it mi
153 ; CHECK-LE-NEXT: vstrmi d3, [r0, #8]
154 ; CHECK-LE-NEXT: add sp, #4
155 ; CHECK-LE-NEXT: pop {r4, r5, r7, pc}
157 ; CHECK-BE-LABEL: foo_sext_v2i64_v2i32:
158 ; CHECK-BE: @ %bb.0: @ %entry
159 ; CHECK-BE-NEXT: .save {r4, r5, r7, lr}
160 ; CHECK-BE-NEXT: push {r4, r5, r7, lr}
161 ; CHECK-BE-NEXT: .pad #4
162 ; CHECK-BE-NEXT: sub sp, #4
163 ; CHECK-BE-NEXT: ldrd r12, lr, [r1]
164 ; CHECK-BE-NEXT: rsbs.w r1, lr, #0
165 ; CHECK-BE-NEXT: mov.w r3, #0
166 ; CHECK-BE-NEXT: sbcs.w r1, r3, lr, asr #31
167 ; CHECK-BE-NEXT: vmov.32 q0[1], r12
168 ; CHECK-BE-NEXT: @ implicit-def: $q2
169 ; CHECK-BE-NEXT: vmov.32 q0[3], lr
170 ; CHECK-BE-NEXT: mov.w lr, #0
171 ; CHECK-BE-NEXT: it lt
172 ; CHECK-BE-NEXT: movlt.w lr, #1
173 ; CHECK-BE-NEXT: rsbs.w r1, r12, #0
174 ; CHECK-BE-NEXT: sbcs.w r1, r3, r12, asr #31
175 ; CHECK-BE-NEXT: it lt
176 ; CHECK-BE-NEXT: movlt r3, #1
177 ; CHECK-BE-NEXT: cmp r3, #0
178 ; CHECK-BE-NEXT: it ne
179 ; CHECK-BE-NEXT: mvnne r3, #1
180 ; CHECK-BE-NEXT: bfi r3, lr, #0, #1
181 ; CHECK-BE-NEXT: and r1, r3, #3
182 ; CHECK-BE-NEXT: lsls r3, r3, #31
183 ; CHECK-BE-NEXT: beq .LBB5_2
184 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
185 ; CHECK-BE-NEXT: ldr r3, [r2]
186 ; CHECK-BE-NEXT: vmov.32 q1[1], r3
187 ; CHECK-BE-NEXT: vrev64.32 q2, q1
188 ; CHECK-BE-NEXT: .LBB5_2: @ %else
189 ; CHECK-BE-NEXT: vrev64.32 q1, q0
190 ; CHECK-BE-NEXT: lsls r1, r1, #30
191 ; CHECK-BE-NEXT: bpl .LBB5_4
192 ; CHECK-BE-NEXT: @ %bb.3: @ %cond.load1
193 ; CHECK-BE-NEXT: ldr r1, [r2, #4]
194 ; CHECK-BE-NEXT: vrev64.32 q0, q2
195 ; CHECK-BE-NEXT: vmov.32 q0[3], r1
196 ; CHECK-BE-NEXT: vrev64.32 q2, q0
197 ; CHECK-BE-NEXT: .LBB5_4: @ %else2
198 ; CHECK-BE-NEXT: vrev64.32 q0, q2
199 ; CHECK-BE-NEXT: vrev64.32 q2, q1
200 ; CHECK-BE-NEXT: vmov r2, s11
201 ; CHECK-BE-NEXT: movs r4, #0
202 ; CHECK-BE-NEXT: vmov r3, s1
203 ; CHECK-BE-NEXT: vmov r1, s3
204 ; CHECK-BE-NEXT: rsbs r5, r2, #0
205 ; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31
206 ; CHECK-BE-NEXT: vmov r2, s9
207 ; CHECK-BE-NEXT: asr.w lr, r3, #31
208 ; CHECK-BE-NEXT: vmov.32 q1[0], lr
209 ; CHECK-BE-NEXT: asr.w r12, r1, #31
210 ; CHECK-BE-NEXT: vmov.32 q1[1], r3
211 ; CHECK-BE-NEXT: vmov.32 q1[2], r12
212 ; CHECK-BE-NEXT: vmov.32 q1[3], r1
213 ; CHECK-BE-NEXT: mov.w r1, #0
214 ; CHECK-BE-NEXT: it lt
215 ; CHECK-BE-NEXT: movlt r1, #1
216 ; CHECK-BE-NEXT: vrev64.32 q0, q1
217 ; CHECK-BE-NEXT: rsbs r3, r2, #0
218 ; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31
219 ; CHECK-BE-NEXT: it lt
220 ; CHECK-BE-NEXT: movlt r4, #1
221 ; CHECK-BE-NEXT: cmp r4, #0
222 ; CHECK-BE-NEXT: it ne
223 ; CHECK-BE-NEXT: mvnne r4, #1
224 ; CHECK-BE-NEXT: bfi r4, r1, #0, #1
225 ; CHECK-BE-NEXT: and r1, r4, #3
226 ; CHECK-BE-NEXT: lsls r2, r4, #31
227 ; CHECK-BE-NEXT: it ne
228 ; CHECK-BE-NEXT: vstrne d0, [r0]
229 ; CHECK-BE-NEXT: lsls r1, r1, #30
230 ; CHECK-BE-NEXT: it mi
231 ; CHECK-BE-NEXT: vstrmi d1, [r0, #8]
232 ; CHECK-BE-NEXT: add sp, #4
233 ; CHECK-BE-NEXT: pop {r4, r5, r7, pc}
235 %0 = load <2 x i32>, <2 x i32>* %mask, align 4
236 %1 = icmp sgt <2 x i32> %0, zeroinitializer
237 %2 = call <2 x i32> @llvm.masked.load.v2i32(<2 x i32>* %src, i32 4, <2 x i1> %1, <2 x i32> undef)
238 %3 = sext <2 x i32> %2 to <2 x i64>
239 call void @llvm.masked.store.v2i64(<2 x i64> %3, <2 x i64>* %dest, i32 8, <2 x i1> %1)
243 define void @foo_sext_v2i64_v2i32_unaligned(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) {
244 ; CHECK-LE-LABEL: foo_sext_v2i64_v2i32_unaligned:
245 ; CHECK-LE: @ %bb.0: @ %entry
246 ; CHECK-LE-NEXT: .save {r4, r5, r7, lr}
247 ; CHECK-LE-NEXT: push {r4, r5, r7, lr}
248 ; CHECK-LE-NEXT: .pad #4
249 ; CHECK-LE-NEXT: sub sp, #4
250 ; CHECK-LE-NEXT: ldrd lr, r12, [r1]
251 ; CHECK-LE-NEXT: movs r1, #0
252 ; CHECK-LE-NEXT: @ implicit-def: $q1
253 ; CHECK-LE-NEXT: movs r4, #0
254 ; CHECK-LE-NEXT: rsbs.w r3, lr, #0
255 ; CHECK-LE-NEXT: vmov.32 q0[0], lr
256 ; CHECK-LE-NEXT: sbcs.w r3, r1, lr, asr #31
257 ; CHECK-LE-NEXT: mov.w lr, #0
258 ; CHECK-LE-NEXT: it lt
259 ; CHECK-LE-NEXT: movlt.w lr, #1
260 ; CHECK-LE-NEXT: rsbs.w r3, r12, #0
261 ; CHECK-LE-NEXT: sbcs.w r3, r1, r12, asr #31
262 ; CHECK-LE-NEXT: it lt
263 ; CHECK-LE-NEXT: movlt r1, #1
264 ; CHECK-LE-NEXT: cmp r1, #0
265 ; CHECK-LE-NEXT: it ne
266 ; CHECK-LE-NEXT: mvnne r1, #1
267 ; CHECK-LE-NEXT: bfi r1, lr, #0, #1
268 ; CHECK-LE-NEXT: vmov.32 q0[2], r12
269 ; CHECK-LE-NEXT: and r3, r1, #3
270 ; CHECK-LE-NEXT: lsls r1, r1, #31
271 ; CHECK-LE-NEXT: itt ne
272 ; CHECK-LE-NEXT: ldrne r1, [r2]
273 ; CHECK-LE-NEXT: vmovne.32 q1[0], r1
274 ; CHECK-LE-NEXT: lsls r1, r3, #30
275 ; CHECK-LE-NEXT: itt mi
276 ; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
277 ; CHECK-LE-NEXT: vmovmi.32 q1[2], r1
278 ; CHECK-LE-NEXT: vmov r2, s0
279 ; CHECK-LE-NEXT: vmov r3, s4
280 ; CHECK-LE-NEXT: vmov r1, s6
281 ; CHECK-LE-NEXT: vmov.32 q1[0], r3
282 ; CHECK-LE-NEXT: rsbs r5, r2, #0
283 ; CHECK-LE-NEXT: sbcs.w r2, r4, r2, asr #31
284 ; CHECK-LE-NEXT: vmov r2, s2
285 ; CHECK-LE-NEXT: asr.w lr, r3, #31
286 ; CHECK-LE-NEXT: vmov.32 q1[1], lr
287 ; CHECK-LE-NEXT: asr.w r12, r1, #31
288 ; CHECK-LE-NEXT: vmov.32 q1[2], r1
289 ; CHECK-LE-NEXT: mov.w r1, #0
290 ; CHECK-LE-NEXT: it lt
291 ; CHECK-LE-NEXT: movlt r1, #1
292 ; CHECK-LE-NEXT: vmov.32 q1[3], r12
293 ; CHECK-LE-NEXT: rsbs r3, r2, #0
294 ; CHECK-LE-NEXT: sbcs.w r2, r4, r2, asr #31
295 ; CHECK-LE-NEXT: it lt
296 ; CHECK-LE-NEXT: movlt r4, #1
297 ; CHECK-LE-NEXT: cmp r4, #0
298 ; CHECK-LE-NEXT: it ne
299 ; CHECK-LE-NEXT: mvnne r4, #1
300 ; CHECK-LE-NEXT: bfi r4, r1, #0, #1
301 ; CHECK-LE-NEXT: and r1, r4, #3
302 ; CHECK-LE-NEXT: lsls r2, r4, #31
303 ; CHECK-LE-NEXT: itt ne
304 ; CHECK-LE-NEXT: vmovne r2, r3, d2
305 ; CHECK-LE-NEXT: strdne r2, r3, [r0]
306 ; CHECK-LE-NEXT: lsls r1, r1, #30
307 ; CHECK-LE-NEXT: itt mi
308 ; CHECK-LE-NEXT: vmovmi r1, r2, d3
309 ; CHECK-LE-NEXT: strdmi r1, r2, [r0, #8]
310 ; CHECK-LE-NEXT: add sp, #4
311 ; CHECK-LE-NEXT: pop {r4, r5, r7, pc}
313 ; CHECK-BE-LABEL: foo_sext_v2i64_v2i32_unaligned:
314 ; CHECK-BE: @ %bb.0: @ %entry
315 ; CHECK-BE-NEXT: .save {r4, r5, r7, lr}
316 ; CHECK-BE-NEXT: push {r4, r5, r7, lr}
317 ; CHECK-BE-NEXT: .pad #4
318 ; CHECK-BE-NEXT: sub sp, #4
319 ; CHECK-BE-NEXT: ldrd r12, lr, [r1]
320 ; CHECK-BE-NEXT: rsbs.w r1, lr, #0
321 ; CHECK-BE-NEXT: mov.w r3, #0
322 ; CHECK-BE-NEXT: sbcs.w r1, r3, lr, asr #31
323 ; CHECK-BE-NEXT: vmov.32 q0[1], r12
324 ; CHECK-BE-NEXT: @ implicit-def: $q2
325 ; CHECK-BE-NEXT: vmov.32 q0[3], lr
326 ; CHECK-BE-NEXT: mov.w lr, #0
327 ; CHECK-BE-NEXT: it lt
328 ; CHECK-BE-NEXT: movlt.w lr, #1
329 ; CHECK-BE-NEXT: rsbs.w r1, r12, #0
330 ; CHECK-BE-NEXT: sbcs.w r1, r3, r12, asr #31
331 ; CHECK-BE-NEXT: it lt
332 ; CHECK-BE-NEXT: movlt r3, #1
333 ; CHECK-BE-NEXT: cmp r3, #0
334 ; CHECK-BE-NEXT: it ne
335 ; CHECK-BE-NEXT: mvnne r3, #1
336 ; CHECK-BE-NEXT: bfi r3, lr, #0, #1
337 ; CHECK-BE-NEXT: and r1, r3, #3
338 ; CHECK-BE-NEXT: lsls r3, r3, #31
339 ; CHECK-BE-NEXT: beq .LBB6_2
340 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
341 ; CHECK-BE-NEXT: ldr r3, [r2]
342 ; CHECK-BE-NEXT: vmov.32 q1[1], r3
343 ; CHECK-BE-NEXT: vrev64.32 q2, q1
344 ; CHECK-BE-NEXT: .LBB6_2: @ %else
345 ; CHECK-BE-NEXT: vrev64.32 q1, q0
346 ; CHECK-BE-NEXT: lsls r1, r1, #30
347 ; CHECK-BE-NEXT: bpl .LBB6_4
348 ; CHECK-BE-NEXT: @ %bb.3: @ %cond.load1
349 ; CHECK-BE-NEXT: ldr r1, [r2, #4]
350 ; CHECK-BE-NEXT: vrev64.32 q0, q2
351 ; CHECK-BE-NEXT: vmov.32 q0[3], r1
352 ; CHECK-BE-NEXT: vrev64.32 q2, q0
353 ; CHECK-BE-NEXT: .LBB6_4: @ %else2
354 ; CHECK-BE-NEXT: vrev64.32 q0, q2
355 ; CHECK-BE-NEXT: vrev64.32 q2, q1
356 ; CHECK-BE-NEXT: vmov r2, s11
357 ; CHECK-BE-NEXT: movs r4, #0
358 ; CHECK-BE-NEXT: vmov r3, s1
359 ; CHECK-BE-NEXT: vmov r1, s3
360 ; CHECK-BE-NEXT: rsbs r5, r2, #0
361 ; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31
362 ; CHECK-BE-NEXT: vmov r2, s9
363 ; CHECK-BE-NEXT: asr.w lr, r3, #31
364 ; CHECK-BE-NEXT: vmov.32 q1[0], lr
365 ; CHECK-BE-NEXT: asr.w r12, r1, #31
366 ; CHECK-BE-NEXT: vmov.32 q1[1], r3
367 ; CHECK-BE-NEXT: vmov.32 q1[2], r12
368 ; CHECK-BE-NEXT: vmov.32 q1[3], r1
369 ; CHECK-BE-NEXT: mov.w r1, #0
370 ; CHECK-BE-NEXT: it lt
371 ; CHECK-BE-NEXT: movlt r1, #1
372 ; CHECK-BE-NEXT: vrev64.32 q0, q1
373 ; CHECK-BE-NEXT: rsbs r3, r2, #0
374 ; CHECK-BE-NEXT: sbcs.w r2, r4, r2, asr #31
375 ; CHECK-BE-NEXT: it lt
376 ; CHECK-BE-NEXT: movlt r4, #1
377 ; CHECK-BE-NEXT: cmp r4, #0
378 ; CHECK-BE-NEXT: it ne
379 ; CHECK-BE-NEXT: mvnne r4, #1
380 ; CHECK-BE-NEXT: bfi r4, r1, #0, #1
381 ; CHECK-BE-NEXT: and r1, r4, #3
382 ; CHECK-BE-NEXT: lsls r2, r4, #31
383 ; CHECK-BE-NEXT: itt ne
384 ; CHECK-BE-NEXT: vmovne r2, r3, d0
385 ; CHECK-BE-NEXT: strdne r3, r2, [r0]
386 ; CHECK-BE-NEXT: lsls r1, r1, #30
387 ; CHECK-BE-NEXT: itt mi
388 ; CHECK-BE-NEXT: vmovmi r1, r2, d1
389 ; CHECK-BE-NEXT: strdmi r2, r1, [r0, #8]
390 ; CHECK-BE-NEXT: add sp, #4
391 ; CHECK-BE-NEXT: pop {r4, r5, r7, pc}
393 %0 = load <2 x i32>, <2 x i32>* %mask, align 4
394 %1 = icmp sgt <2 x i32> %0, zeroinitializer
395 %2 = call <2 x i32> @llvm.masked.load.v2i32(<2 x i32>* %src, i32 2, <2 x i1> %1, <2 x i32> undef)
396 %3 = sext <2 x i32> %2 to <2 x i64>
397 call void @llvm.masked.store.v2i64(<2 x i64> %3, <2 x i64>* %dest, i32 4, <2 x i1> %1)
401 define void @foo_zext_v2i64_v2i32(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) {
402 ; CHECK-LE-LABEL: foo_zext_v2i64_v2i32:
403 ; CHECK-LE: @ %bb.0: @ %entry
404 ; CHECK-LE-NEXT: .save {r7, lr}
405 ; CHECK-LE-NEXT: push {r7, lr}
406 ; CHECK-LE-NEXT: .pad #4
407 ; CHECK-LE-NEXT: sub sp, #4
408 ; CHECK-LE-NEXT: ldrd lr, r12, [r1]
409 ; CHECK-LE-NEXT: movs r1, #0
410 ; CHECK-LE-NEXT: @ implicit-def: $q1
411 ; CHECK-LE-NEXT: rsbs.w r3, lr, #0
412 ; CHECK-LE-NEXT: vmov.32 q0[0], lr
413 ; CHECK-LE-NEXT: sbcs.w r3, r1, lr, asr #31
414 ; CHECK-LE-NEXT: mov.w lr, #0
415 ; CHECK-LE-NEXT: it lt
416 ; CHECK-LE-NEXT: movlt.w lr, #1
417 ; CHECK-LE-NEXT: rsbs.w r3, r12, #0
418 ; CHECK-LE-NEXT: sbcs.w r3, r1, r12, asr #31
419 ; CHECK-LE-NEXT: it lt
420 ; CHECK-LE-NEXT: movlt r1, #1
421 ; CHECK-LE-NEXT: cmp r1, #0
422 ; CHECK-LE-NEXT: it ne
423 ; CHECK-LE-NEXT: mvnne r1, #1
424 ; CHECK-LE-NEXT: bfi r1, lr, #0, #1
425 ; CHECK-LE-NEXT: vmov.32 q0[2], r12
426 ; CHECK-LE-NEXT: and r3, r1, #3
427 ; CHECK-LE-NEXT: adr.w r12, .LCPI7_0
428 ; CHECK-LE-NEXT: lsls r1, r1, #31
429 ; CHECK-LE-NEXT: itt ne
430 ; CHECK-LE-NEXT: ldrne r1, [r2]
431 ; CHECK-LE-NEXT: vmovne.32 q1[0], r1
432 ; CHECK-LE-NEXT: lsls r1, r3, #30
433 ; CHECK-LE-NEXT: vmov r3, s0
434 ; CHECK-LE-NEXT: itt mi
435 ; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
436 ; CHECK-LE-NEXT: vmovmi.32 q1[2], r1
437 ; CHECK-LE-NEXT: movs r2, #0
438 ; CHECK-LE-NEXT: vldrw.u32 q2, [r12]
439 ; CHECK-LE-NEXT: mov.w r12, #0
440 ; CHECK-LE-NEXT: vand q1, q1, q2
441 ; CHECK-LE-NEXT: rsbs r1, r3, #0
442 ; CHECK-LE-NEXT: sbcs.w r1, r2, r3, asr #31
443 ; CHECK-LE-NEXT: vmov r3, s2
444 ; CHECK-LE-NEXT: it lt
445 ; CHECK-LE-NEXT: movlt.w r12, #1
446 ; CHECK-LE-NEXT: rsbs r1, r3, #0
447 ; CHECK-LE-NEXT: sbcs.w r1, r2, r3, asr #31
448 ; CHECK-LE-NEXT: it lt
449 ; CHECK-LE-NEXT: movlt r2, #1
450 ; CHECK-LE-NEXT: cmp r2, #0
451 ; CHECK-LE-NEXT: it ne
452 ; CHECK-LE-NEXT: mvnne r2, #1
453 ; CHECK-LE-NEXT: bfi r2, r12, #0, #1
454 ; CHECK-LE-NEXT: and r1, r2, #3
455 ; CHECK-LE-NEXT: lsls r2, r2, #31
456 ; CHECK-LE-NEXT: it ne
457 ; CHECK-LE-NEXT: vstrne d2, [r0]
458 ; CHECK-LE-NEXT: lsls r1, r1, #30
459 ; CHECK-LE-NEXT: it mi
460 ; CHECK-LE-NEXT: vstrmi d3, [r0, #8]
461 ; CHECK-LE-NEXT: add sp, #4
462 ; CHECK-LE-NEXT: pop {r7, pc}
463 ; CHECK-LE-NEXT: .p2align 4
464 ; CHECK-LE-NEXT: @ %bb.1:
465 ; CHECK-LE-NEXT: .LCPI7_0:
466 ; CHECK-LE-NEXT: .long 4294967295 @ 0xffffffff
467 ; CHECK-LE-NEXT: .long 0 @ 0x0
468 ; CHECK-LE-NEXT: .long 4294967295 @ 0xffffffff
469 ; CHECK-LE-NEXT: .long 0 @ 0x0
471 ; CHECK-BE-LABEL: foo_zext_v2i64_v2i32:
472 ; CHECK-BE: @ %bb.0: @ %entry
473 ; CHECK-BE-NEXT: .save {r7, lr}
474 ; CHECK-BE-NEXT: push {r7, lr}
475 ; CHECK-BE-NEXT: .pad #4
476 ; CHECK-BE-NEXT: sub sp, #4
477 ; CHECK-BE-NEXT: ldrd r12, lr, [r1]
478 ; CHECK-BE-NEXT: rsbs.w r1, lr, #0
479 ; CHECK-BE-NEXT: mov.w r3, #0
480 ; CHECK-BE-NEXT: sbcs.w r1, r3, lr, asr #31
481 ; CHECK-BE-NEXT: vmov.32 q0[1], r12
482 ; CHECK-BE-NEXT: @ implicit-def: $q1
483 ; CHECK-BE-NEXT: vmov.32 q0[3], lr
484 ; CHECK-BE-NEXT: mov.w lr, #0
485 ; CHECK-BE-NEXT: it lt
486 ; CHECK-BE-NEXT: movlt.w lr, #1
487 ; CHECK-BE-NEXT: rsbs.w r1, r12, #0
488 ; CHECK-BE-NEXT: sbcs.w r1, r3, r12, asr #31
489 ; CHECK-BE-NEXT: it lt
490 ; CHECK-BE-NEXT: movlt r3, #1
491 ; CHECK-BE-NEXT: cmp r3, #0
492 ; CHECK-BE-NEXT: it ne
493 ; CHECK-BE-NEXT: mvnne r3, #1
494 ; CHECK-BE-NEXT: bfi r3, lr, #0, #1
495 ; CHECK-BE-NEXT: and r1, r3, #3
496 ; CHECK-BE-NEXT: lsls r3, r3, #31
497 ; CHECK-BE-NEXT: beq .LBB7_2
498 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
499 ; CHECK-BE-NEXT: ldr r3, [r2]
500 ; CHECK-BE-NEXT: vmov.32 q2[1], r3
501 ; CHECK-BE-NEXT: vrev64.32 q1, q2
502 ; CHECK-BE-NEXT: .LBB7_2: @ %else
503 ; CHECK-BE-NEXT: vrev64.32 q2, q0
504 ; CHECK-BE-NEXT: lsls r1, r1, #30
505 ; CHECK-BE-NEXT: bpl .LBB7_4
506 ; CHECK-BE-NEXT: @ %bb.3: @ %cond.load1
507 ; CHECK-BE-NEXT: ldr r1, [r2, #4]
508 ; CHECK-BE-NEXT: vrev64.32 q0, q1
509 ; CHECK-BE-NEXT: vmov.32 q0[3], r1
510 ; CHECK-BE-NEXT: vrev64.32 q1, q0
511 ; CHECK-BE-NEXT: .LBB7_4: @ %else2
512 ; CHECK-BE-NEXT: vrev64.32 q3, q2
513 ; CHECK-BE-NEXT: movs r2, #0
514 ; CHECK-BE-NEXT: vmov r3, s15
515 ; CHECK-BE-NEXT: adr.w r12, .LCPI7_0
516 ; CHECK-BE-NEXT: vldrb.u8 q0, [r12]
517 ; CHECK-BE-NEXT: mov.w r12, #0
518 ; CHECK-BE-NEXT: vrev64.8 q2, q0
519 ; CHECK-BE-NEXT: vand q0, q1, q2
520 ; CHECK-BE-NEXT: rsbs r1, r3, #0
521 ; CHECK-BE-NEXT: sbcs.w r1, r2, r3, asr #31
522 ; CHECK-BE-NEXT: vmov r3, s13
523 ; CHECK-BE-NEXT: it lt
524 ; CHECK-BE-NEXT: movlt.w r12, #1
525 ; CHECK-BE-NEXT: rsbs r1, r3, #0
526 ; CHECK-BE-NEXT: sbcs.w r1, r2, r3, asr #31
527 ; CHECK-BE-NEXT: it lt
528 ; CHECK-BE-NEXT: movlt r2, #1
529 ; CHECK-BE-NEXT: cmp r2, #0
530 ; CHECK-BE-NEXT: it ne
531 ; CHECK-BE-NEXT: mvnne r2, #1
532 ; CHECK-BE-NEXT: bfi r2, r12, #0, #1
533 ; CHECK-BE-NEXT: and r1, r2, #3
534 ; CHECK-BE-NEXT: lsls r2, r2, #31
535 ; CHECK-BE-NEXT: it ne
536 ; CHECK-BE-NEXT: vstrne d0, [r0]
537 ; CHECK-BE-NEXT: lsls r1, r1, #30
538 ; CHECK-BE-NEXT: it mi
539 ; CHECK-BE-NEXT: vstrmi d1, [r0, #8]
540 ; CHECK-BE-NEXT: add sp, #4
541 ; CHECK-BE-NEXT: pop {r7, pc}
542 ; CHECK-BE-NEXT: .p2align 4
543 ; CHECK-BE-NEXT: @ %bb.5:
544 ; CHECK-BE-NEXT: .LCPI7_0:
545 ; CHECK-BE-NEXT: .long 0 @ 0x0
546 ; CHECK-BE-NEXT: .long 4294967295 @ 0xffffffff
547 ; CHECK-BE-NEXT: .long 0 @ 0x0
548 ; CHECK-BE-NEXT: .long 4294967295 @ 0xffffffff
550 %0 = load <2 x i32>, <2 x i32>* %mask, align 4
551 %1 = icmp sgt <2 x i32> %0, zeroinitializer
552 %2 = call <2 x i32> @llvm.masked.load.v2i32(<2 x i32>* %src, i32 4, <2 x i1> %1, <2 x i32> undef)
553 %3 = zext <2 x i32> %2 to <2 x i64>
554 call void @llvm.masked.store.v2i64(<2 x i64> %3, <2 x i64>* %dest, i32 8, <2 x i1> %1)
558 define void @foo_zext_v2i64_v2i32_unaligned(<2 x i64> *%dest, <2 x i32> *%mask, <2 x i32> *%src) {
559 ; CHECK-LE-LABEL: foo_zext_v2i64_v2i32_unaligned:
560 ; CHECK-LE: @ %bb.0: @ %entry
561 ; CHECK-LE-NEXT: .save {r7, lr}
562 ; CHECK-LE-NEXT: push {r7, lr}
563 ; CHECK-LE-NEXT: .pad #4
564 ; CHECK-LE-NEXT: sub sp, #4
565 ; CHECK-LE-NEXT: ldrd lr, r12, [r1]
566 ; CHECK-LE-NEXT: movs r1, #0
567 ; CHECK-LE-NEXT: @ implicit-def: $q1
568 ; CHECK-LE-NEXT: rsbs.w r3, lr, #0
569 ; CHECK-LE-NEXT: vmov.32 q0[0], lr
570 ; CHECK-LE-NEXT: sbcs.w r3, r1, lr, asr #31
571 ; CHECK-LE-NEXT: mov.w lr, #0
572 ; CHECK-LE-NEXT: it lt
573 ; CHECK-LE-NEXT: movlt.w lr, #1
574 ; CHECK-LE-NEXT: rsbs.w r3, r12, #0
575 ; CHECK-LE-NEXT: sbcs.w r3, r1, r12, asr #31
576 ; CHECK-LE-NEXT: it lt
577 ; CHECK-LE-NEXT: movlt r1, #1
578 ; CHECK-LE-NEXT: cmp r1, #0
579 ; CHECK-LE-NEXT: it ne
580 ; CHECK-LE-NEXT: mvnne r1, #1
581 ; CHECK-LE-NEXT: bfi r1, lr, #0, #1
582 ; CHECK-LE-NEXT: vmov.32 q0[2], r12
583 ; CHECK-LE-NEXT: and r3, r1, #3
584 ; CHECK-LE-NEXT: adr.w r12, .LCPI8_0
585 ; CHECK-LE-NEXT: lsls r1, r1, #31
586 ; CHECK-LE-NEXT: itt ne
587 ; CHECK-LE-NEXT: ldrne r1, [r2]
588 ; CHECK-LE-NEXT: vmovne.32 q1[0], r1
589 ; CHECK-LE-NEXT: lsls r1, r3, #30
590 ; CHECK-LE-NEXT: vmov r3, s0
591 ; CHECK-LE-NEXT: itt mi
592 ; CHECK-LE-NEXT: ldrmi r1, [r2, #4]
593 ; CHECK-LE-NEXT: vmovmi.32 q1[2], r1
594 ; CHECK-LE-NEXT: movs r2, #0
595 ; CHECK-LE-NEXT: vldrw.u32 q2, [r12]
596 ; CHECK-LE-NEXT: mov.w r12, #0
597 ; CHECK-LE-NEXT: vand q1, q1, q2
598 ; CHECK-LE-NEXT: rsbs r1, r3, #0
599 ; CHECK-LE-NEXT: sbcs.w r1, r2, r3, asr #31
600 ; CHECK-LE-NEXT: vmov r3, s2
601 ; CHECK-LE-NEXT: it lt
602 ; CHECK-LE-NEXT: movlt.w r12, #1
603 ; CHECK-LE-NEXT: rsbs r1, r3, #0
604 ; CHECK-LE-NEXT: sbcs.w r1, r2, r3, asr #31
605 ; CHECK-LE-NEXT: it lt
606 ; CHECK-LE-NEXT: movlt r2, #1
607 ; CHECK-LE-NEXT: cmp r2, #0
608 ; CHECK-LE-NEXT: it ne
609 ; CHECK-LE-NEXT: mvnne r2, #1
610 ; CHECK-LE-NEXT: bfi r2, r12, #0, #1
611 ; CHECK-LE-NEXT: and r1, r2, #3
612 ; CHECK-LE-NEXT: lsls r2, r2, #31
613 ; CHECK-LE-NEXT: itt ne
614 ; CHECK-LE-NEXT: vmovne r2, r3, d2
615 ; CHECK-LE-NEXT: strdne r2, r3, [r0]
616 ; CHECK-LE-NEXT: lsls r1, r1, #30
617 ; CHECK-LE-NEXT: itt mi
618 ; CHECK-LE-NEXT: vmovmi r1, r2, d3
619 ; CHECK-LE-NEXT: strdmi r1, r2, [r0, #8]
620 ; CHECK-LE-NEXT: add sp, #4
621 ; CHECK-LE-NEXT: pop {r7, pc}
622 ; CHECK-LE-NEXT: .p2align 4
623 ; CHECK-LE-NEXT: @ %bb.1:
624 ; CHECK-LE-NEXT: .LCPI8_0:
625 ; CHECK-LE-NEXT: .long 4294967295 @ 0xffffffff
626 ; CHECK-LE-NEXT: .long 0 @ 0x0
627 ; CHECK-LE-NEXT: .long 4294967295 @ 0xffffffff
628 ; CHECK-LE-NEXT: .long 0 @ 0x0
630 ; CHECK-BE-LABEL: foo_zext_v2i64_v2i32_unaligned:
631 ; CHECK-BE: @ %bb.0: @ %entry
632 ; CHECK-BE-NEXT: .save {r7, lr}
633 ; CHECK-BE-NEXT: push {r7, lr}
634 ; CHECK-BE-NEXT: .pad #4
635 ; CHECK-BE-NEXT: sub sp, #4
636 ; CHECK-BE-NEXT: ldrd r12, lr, [r1]
637 ; CHECK-BE-NEXT: rsbs.w r1, lr, #0
638 ; CHECK-BE-NEXT: mov.w r3, #0
639 ; CHECK-BE-NEXT: sbcs.w r1, r3, lr, asr #31
640 ; CHECK-BE-NEXT: vmov.32 q0[1], r12
641 ; CHECK-BE-NEXT: @ implicit-def: $q1
642 ; CHECK-BE-NEXT: vmov.32 q0[3], lr
643 ; CHECK-BE-NEXT: mov.w lr, #0
644 ; CHECK-BE-NEXT: it lt
645 ; CHECK-BE-NEXT: movlt.w lr, #1
646 ; CHECK-BE-NEXT: rsbs.w r1, r12, #0
647 ; CHECK-BE-NEXT: sbcs.w r1, r3, r12, asr #31
648 ; CHECK-BE-NEXT: it lt
649 ; CHECK-BE-NEXT: movlt r3, #1
650 ; CHECK-BE-NEXT: cmp r3, #0
651 ; CHECK-BE-NEXT: it ne
652 ; CHECK-BE-NEXT: mvnne r3, #1
653 ; CHECK-BE-NEXT: bfi r3, lr, #0, #1
654 ; CHECK-BE-NEXT: and r1, r3, #3
655 ; CHECK-BE-NEXT: lsls r3, r3, #31
656 ; CHECK-BE-NEXT: beq .LBB8_2
657 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
658 ; CHECK-BE-NEXT: ldr r3, [r2]
659 ; CHECK-BE-NEXT: vmov.32 q2[1], r3
660 ; CHECK-BE-NEXT: vrev64.32 q1, q2
661 ; CHECK-BE-NEXT: .LBB8_2: @ %else
662 ; CHECK-BE-NEXT: vrev64.32 q2, q0
663 ; CHECK-BE-NEXT: lsls r1, r1, #30
664 ; CHECK-BE-NEXT: bpl .LBB8_4
665 ; CHECK-BE-NEXT: @ %bb.3: @ %cond.load1
666 ; CHECK-BE-NEXT: ldr r1, [r2, #4]
667 ; CHECK-BE-NEXT: vrev64.32 q0, q1
668 ; CHECK-BE-NEXT: vmov.32 q0[3], r1
669 ; CHECK-BE-NEXT: vrev64.32 q1, q0
670 ; CHECK-BE-NEXT: .LBB8_4: @ %else2
671 ; CHECK-BE-NEXT: vrev64.32 q3, q2
672 ; CHECK-BE-NEXT: movs r2, #0
673 ; CHECK-BE-NEXT: vmov r3, s15
674 ; CHECK-BE-NEXT: adr.w r12, .LCPI8_0
675 ; CHECK-BE-NEXT: vldrb.u8 q0, [r12]
676 ; CHECK-BE-NEXT: mov.w r12, #0
677 ; CHECK-BE-NEXT: vrev64.8 q2, q0
678 ; CHECK-BE-NEXT: vand q0, q1, q2
679 ; CHECK-BE-NEXT: rsbs r1, r3, #0
680 ; CHECK-BE-NEXT: sbcs.w r1, r2, r3, asr #31
681 ; CHECK-BE-NEXT: vmov r3, s13
682 ; CHECK-BE-NEXT: it lt
683 ; CHECK-BE-NEXT: movlt.w r12, #1
684 ; CHECK-BE-NEXT: rsbs r1, r3, #0
685 ; CHECK-BE-NEXT: sbcs.w r1, r2, r3, asr #31
686 ; CHECK-BE-NEXT: it lt
687 ; CHECK-BE-NEXT: movlt r2, #1
688 ; CHECK-BE-NEXT: cmp r2, #0
689 ; CHECK-BE-NEXT: it ne
690 ; CHECK-BE-NEXT: mvnne r2, #1
691 ; CHECK-BE-NEXT: bfi r2, r12, #0, #1
692 ; CHECK-BE-NEXT: and r1, r2, #3
693 ; CHECK-BE-NEXT: lsls r2, r2, #31
694 ; CHECK-BE-NEXT: itt ne
695 ; CHECK-BE-NEXT: vmovne r2, r3, d0
696 ; CHECK-BE-NEXT: strdne r3, r2, [r0]
697 ; CHECK-BE-NEXT: lsls r1, r1, #30
698 ; CHECK-BE-NEXT: itt mi
699 ; CHECK-BE-NEXT: vmovmi r1, r2, d1
700 ; CHECK-BE-NEXT: strdmi r2, r1, [r0, #8]
701 ; CHECK-BE-NEXT: add sp, #4
702 ; CHECK-BE-NEXT: pop {r7, pc}
703 ; CHECK-BE-NEXT: .p2align 4
704 ; CHECK-BE-NEXT: @ %bb.5:
705 ; CHECK-BE-NEXT: .LCPI8_0:
706 ; CHECK-BE-NEXT: .long 0 @ 0x0
707 ; CHECK-BE-NEXT: .long 4294967295 @ 0xffffffff
708 ; CHECK-BE-NEXT: .long 0 @ 0x0
709 ; CHECK-BE-NEXT: .long 4294967295 @ 0xffffffff
711 %0 = load <2 x i32>, <2 x i32>* %mask, align 4
712 %1 = icmp sgt <2 x i32> %0, zeroinitializer
713 %2 = call <2 x i32> @llvm.masked.load.v2i32(<2 x i32>* %src, i32 2, <2 x i1> %1, <2 x i32> undef)
714 %3 = zext <2 x i32> %2 to <2 x i64>
715 call void @llvm.masked.store.v2i64(<2 x i64> %3, <2 x i64>* %dest, i32 4, <2 x i1> %1)
719 define void @foo_v8i16_v8i16(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i16> *%src) {
720 ; CHECK-LABEL: foo_v8i16_v8i16:
721 ; CHECK: @ %bb.0: @ %entry
722 ; CHECK-NEXT: vldrh.u16 q0, [r1]
723 ; CHECK-NEXT: vptt.s16 gt, q0, zr
724 ; CHECK-NEXT: vldrht.u16 q0, [r2]
725 ; CHECK-NEXT: vstrht.16 q0, [r0]
728 %0 = load <8 x i16>, <8 x i16>* %mask, align 2
729 %1 = icmp sgt <8 x i16> %0, zeroinitializer
730 %2 = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %src, i32 2, <8 x i1> %1, <8 x i16> undef)
731 call void @llvm.masked.store.v8i16(<8 x i16> %2, <8 x i16>* %dest, i32 2, <8 x i1> %1)
735 define void @foo_sext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%src) {
736 ; CHECK-LABEL: foo_sext_v8i16_v8i8:
737 ; CHECK: @ %bb.0: @ %entry
738 ; CHECK-NEXT: vldrh.u16 q0, [r1]
739 ; CHECK-NEXT: vptt.s16 gt, q0, zr
740 ; CHECK-NEXT: vldrbt.s16 q0, [r2]
741 ; CHECK-NEXT: vstrht.16 q0, [r0]
744 %0 = load <8 x i16>, <8 x i16>* %mask, align 2
745 %1 = icmp sgt <8 x i16> %0, zeroinitializer
746 %2 = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %src, i32 1, <8 x i1> %1, <8 x i8> undef)
747 %3 = sext <8 x i8> %2 to <8 x i16>
748 call void @llvm.masked.store.v8i16(<8 x i16> %3, <8 x i16>* %dest, i32 2, <8 x i1> %1)
752 define void @foo_zext_v8i16_v8i8(<8 x i16> *%dest, <8 x i16> *%mask, <8 x i8> *%src) {
753 ; CHECK-LABEL: foo_zext_v8i16_v8i8:
754 ; CHECK: @ %bb.0: @ %entry
755 ; CHECK-NEXT: vldrh.u16 q0, [r1]
756 ; CHECK-NEXT: vptt.s16 gt, q0, zr
757 ; CHECK-NEXT: vldrbt.u16 q0, [r2]
758 ; CHECK-NEXT: vstrht.16 q0, [r0]
761 %0 = load <8 x i16>, <8 x i16>* %mask, align 2
762 %1 = icmp sgt <8 x i16> %0, zeroinitializer
763 %2 = call <8 x i8> @llvm.masked.load.v8i8(<8 x i8>* %src, i32 1, <8 x i1> %1, <8 x i8> undef)
764 %3 = zext <8 x i8> %2 to <8 x i16>
765 call void @llvm.masked.store.v8i16(<8 x i16> %3, <8 x i16>* %dest, i32 2, <8 x i1> %1)
769 define void @foo_v16i8_v16i8(<16 x i8> *%dest, <16 x i8> *%mask, <16 x i8> *%src) {
770 ; CHECK-LABEL: foo_v16i8_v16i8:
771 ; CHECK: @ %bb.0: @ %entry
772 ; CHECK-NEXT: vldrb.u8 q0, [r1]
773 ; CHECK-NEXT: vptt.s8 gt, q0, zr
774 ; CHECK-NEXT: vldrbt.u8 q0, [r2]
775 ; CHECK-NEXT: vstrbt.8 q0, [r0]
778 %0 = load <16 x i8>, <16 x i8>* %mask, align 1
779 %1 = icmp sgt <16 x i8> %0, zeroinitializer
780 %2 = call <16 x i8> @llvm.masked.load.v16i8(<16 x i8>* %src, i32 1, <16 x i1> %1, <16 x i8> undef)
781 call void @llvm.masked.store.v16i8(<16 x i8> %2, <16 x i8>* %dest, i32 1, <16 x i1> %1)
785 define void @foo_trunc_v8i8_v8i16(<8 x i8> *%dest, <8 x i16> *%mask, <8 x i16> *%src) {
786 ; CHECK-LABEL: foo_trunc_v8i8_v8i16:
787 ; CHECK: @ %bb.0: @ %entry
788 ; CHECK-NEXT: vldrh.u16 q0, [r1]
789 ; CHECK-NEXT: vptt.s16 gt, q0, zr
790 ; CHECK-NEXT: vldrht.u16 q0, [r2]
791 ; CHECK-NEXT: vstrbt.16 q0, [r0]
794 %0 = load <8 x i16>, <8 x i16>* %mask, align 2
795 %1 = icmp sgt <8 x i16> %0, zeroinitializer
796 %2 = call <8 x i16> @llvm.masked.load.v8i16(<8 x i16>* %src, i32 2, <8 x i1> %1, <8 x i16> undef)
797 %3 = trunc <8 x i16> %2 to <8 x i8>
798 call void @llvm.masked.store.v8i8(<8 x i8> %3, <8 x i8>* %dest, i32 1, <8 x i1> %1)
802 define void @foo_trunc_v4i8_v4i32(<4 x i8> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
803 ; CHECK-LABEL: foo_trunc_v4i8_v4i32:
804 ; CHECK: @ %bb.0: @ %entry
805 ; CHECK-NEXT: vldrw.u32 q0, [r1]
806 ; CHECK-NEXT: vptt.s32 gt, q0, zr
807 ; CHECK-NEXT: vldrwt.u32 q0, [r2]
808 ; CHECK-NEXT: vstrbt.32 q0, [r0]
811 %0 = load <4 x i32>, <4 x i32>* %mask, align 4
812 %1 = icmp sgt <4 x i32> %0, zeroinitializer
813 %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
814 %3 = trunc <4 x i32> %2 to <4 x i8>
815 call void @llvm.masked.store.v4i8(<4 x i8> %3, <4 x i8>* %dest, i32 1, <4 x i1> %1)
819 define void @foo_trunc_v4i16_v4i32(<4 x i16> *%dest, <4 x i32> *%mask, <4 x i32> *%src) {
820 ; CHECK-LABEL: foo_trunc_v4i16_v4i32:
821 ; CHECK: @ %bb.0: @ %entry
822 ; CHECK-NEXT: vldrw.u32 q0, [r1]
823 ; CHECK-NEXT: vptt.s32 gt, q0, zr
824 ; CHECK-NEXT: vldrwt.u32 q0, [r2]
825 ; CHECK-NEXT: vstrht.32 q0, [r0]
828 %0 = load <4 x i32>, <4 x i32>* %mask, align 4
829 %1 = icmp sgt <4 x i32> %0, zeroinitializer
830 %2 = call <4 x i32> @llvm.masked.load.v4i32(<4 x i32>* %src, i32 4, <4 x i1> %1, <4 x i32> undef)
831 %3 = trunc <4 x i32> %2 to <4 x i16>
832 call void @llvm.masked.store.v4i16(<4 x i16> %3, <4 x i16>* %dest, i32 2, <4 x i1> %1)
836 define void @foo_v4f32_v4f32(<4 x float> *%dest, <4 x i32> *%mask, <4 x float> *%src) {
837 ; CHECK-LABEL: foo_v4f32_v4f32:
838 ; CHECK: @ %bb.0: @ %entry
839 ; CHECK-NEXT: vldrw.u32 q0, [r1]
840 ; CHECK-NEXT: vptt.s32 gt, q0, zr
841 ; CHECK-NEXT: vldrwt.u32 q0, [r2]
842 ; CHECK-NEXT: vstrwt.32 q0, [r0]
845 %0 = load <4 x i32>, <4 x i32>* %mask, align 4
846 %1 = icmp sgt <4 x i32> %0, zeroinitializer
847 %2 = call <4 x float> @llvm.masked.load.v4f32(<4 x float>* %src, i32 4, <4 x i1> %1, <4 x float> undef)
848 call void @llvm.masked.store.v4f32(<4 x float> %2, <4 x float>* %dest, i32 4, <4 x i1> %1)
852 define void @foo_v8f16_v8f16(<8 x half> *%dest, <8 x i16> *%mask, <8 x half> *%src) {
853 ; CHECK-LABEL: foo_v8f16_v8f16:
854 ; CHECK: @ %bb.0: @ %entry
855 ; CHECK-NEXT: vldrh.u16 q0, [r1]
856 ; CHECK-NEXT: vptt.s16 gt, q0, zr
857 ; CHECK-NEXT: vldrht.u16 q0, [r2]
858 ; CHECK-NEXT: vstrht.16 q0, [r0]
861 %0 = load <8 x i16>, <8 x i16>* %mask, align 2
862 %1 = icmp sgt <8 x i16> %0, zeroinitializer
863 %2 = call <8 x half> @llvm.masked.load.v8f16(<8 x half>* %src, i32 2, <8 x i1> %1, <8 x half> undef)
864 call void @llvm.masked.store.v8f16(<8 x half> %2, <8 x half>* %dest, i32 2, <8 x i1> %1)
868 define void @foo_v4f32_v4f16(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%src) {
869 ; CHECK-LABEL: foo_v4f32_v4f16:
870 ; CHECK: @ %bb.0: @ %entry
871 ; CHECK-NEXT: .save {r7, lr}
872 ; CHECK-NEXT: push {r7, lr}
873 ; CHECK-NEXT: .pad #24
874 ; CHECK-NEXT: sub sp, #24
875 ; CHECK-NEXT: vldrh.s32 q0, [r1]
876 ; CHECK-NEXT: mov.w lr, #0
877 ; CHECK-NEXT: @ implicit-def: $q1
878 ; CHECK-NEXT: vcmp.s32 gt, q0, zr
879 ; CHECK-NEXT: vmrs r3, p0
880 ; CHECK-NEXT: and r1, r3, #1
881 ; CHECK-NEXT: rsb.w r12, r1, #0
882 ; CHECK-NEXT: ubfx r1, r3, #4, #1
883 ; CHECK-NEXT: bfi lr, r12, #0, #1
884 ; CHECK-NEXT: rsbs r1, r1, #0
885 ; CHECK-NEXT: bfi lr, r1, #1, #1
886 ; CHECK-NEXT: ubfx r1, r3, #8, #1
887 ; CHECK-NEXT: rsbs r1, r1, #0
888 ; CHECK-NEXT: bfi lr, r1, #2, #1
889 ; CHECK-NEXT: ubfx r1, r3, #12, #1
890 ; CHECK-NEXT: rsbs r1, r1, #0
891 ; CHECK-NEXT: bfi lr, r1, #3, #1
892 ; CHECK-NEXT: lsls.w r1, lr, #31
893 ; CHECK-NEXT: beq .LBB18_2
894 ; CHECK-NEXT: @ %bb.1: @ %cond.load
895 ; CHECK-NEXT: vldr.16 s4, [r2]
896 ; CHECK-NEXT: .LBB18_2: @ %else
897 ; CHECK-NEXT: lsls.w r1, lr, #30
898 ; CHECK-NEXT: bpl .LBB18_6
899 ; CHECK-NEXT: @ %bb.3: @ %cond.load1
900 ; CHECK-NEXT: vldr.16 s0, [r2, #2]
901 ; CHECK-NEXT: vmov r3, s4
902 ; CHECK-NEXT: vmovx.f16 s4, s5
903 ; CHECK-NEXT: vmov r1, s0
904 ; CHECK-NEXT: vmov.16 q0[0], r3
905 ; CHECK-NEXT: vmov.16 q0[1], r1
906 ; CHECK-NEXT: vmov r1, s5
907 ; CHECK-NEXT: vmov.16 q0[2], r1
908 ; CHECK-NEXT: vmov r1, s4
909 ; CHECK-NEXT: vmov.16 q0[3], r1
910 ; CHECK-NEXT: lsls.w r1, lr, #29
911 ; CHECK-NEXT: bmi .LBB18_7
912 ; CHECK-NEXT: .LBB18_4:
913 ; CHECK-NEXT: vmov q2, q0
914 ; CHECK-NEXT: lsls.w r1, lr, #28
915 ; CHECK-NEXT: bmi .LBB18_8
916 ; CHECK-NEXT: .LBB18_5:
917 ; CHECK-NEXT: vmov q1, q2
918 ; CHECK-NEXT: b .LBB18_9
919 ; CHECK-NEXT: .LBB18_6:
920 ; CHECK-NEXT: vmov q0, q1
921 ; CHECK-NEXT: lsls.w r1, lr, #29
922 ; CHECK-NEXT: bpl .LBB18_4
923 ; CHECK-NEXT: .LBB18_7: @ %cond.load4
924 ; CHECK-NEXT: vmovx.f16 s4, s0
925 ; CHECK-NEXT: vmov r1, s0
926 ; CHECK-NEXT: vmov r3, s4
927 ; CHECK-NEXT: vldr.16 s4, [r2, #4]
928 ; CHECK-NEXT: vmov.16 q2[0], r1
929 ; CHECK-NEXT: vmovx.f16 s0, s1
930 ; CHECK-NEXT: vmov.16 q2[1], r3
931 ; CHECK-NEXT: vmov r1, s4
932 ; CHECK-NEXT: vmov.16 q2[2], r1
933 ; CHECK-NEXT: vmov r1, s0
934 ; CHECK-NEXT: vmov.16 q2[3], r1
935 ; CHECK-NEXT: lsls.w r1, lr, #28
936 ; CHECK-NEXT: bpl .LBB18_5
937 ; CHECK-NEXT: .LBB18_8: @ %cond.load7
938 ; CHECK-NEXT: vmovx.f16 s0, s8
939 ; CHECK-NEXT: vmov r3, s8
940 ; CHECK-NEXT: vmov r1, s0
941 ; CHECK-NEXT: vmov.16 q1[0], r3
942 ; CHECK-NEXT: vldr.16 s0, [r2, #6]
943 ; CHECK-NEXT: vmov.16 q1[1], r1
944 ; CHECK-NEXT: vmov r1, s9
945 ; CHECK-NEXT: vmov.16 q1[2], r1
946 ; CHECK-NEXT: vmov r1, s0
947 ; CHECK-NEXT: vmov.16 q1[3], r1
948 ; CHECK-NEXT: .LBB18_9: @ %else8
949 ; CHECK-NEXT: vmrs r2, p0
950 ; CHECK-NEXT: vmovx.f16 s0, s5
951 ; CHECK-NEXT: vcvtb.f32.f16 s3, s0
952 ; CHECK-NEXT: vmovx.f16 s8, s4
953 ; CHECK-NEXT: vcvtb.f32.f16 s2, s5
954 ; CHECK-NEXT: movs r1, #0
955 ; CHECK-NEXT: vcvtb.f32.f16 s1, s8
956 ; CHECK-NEXT: vcvtb.f32.f16 s0, s4
957 ; CHECK-NEXT: and r3, r2, #1
958 ; CHECK-NEXT: rsbs r3, r3, #0
959 ; CHECK-NEXT: bfi r1, r3, #0, #1
960 ; CHECK-NEXT: ubfx r3, r2, #4, #1
961 ; CHECK-NEXT: rsbs r3, r3, #0
962 ; CHECK-NEXT: bfi r1, r3, #1, #1
963 ; CHECK-NEXT: ubfx r3, r2, #8, #1
964 ; CHECK-NEXT: ubfx r2, r2, #12, #1
965 ; CHECK-NEXT: rsbs r3, r3, #0
966 ; CHECK-NEXT: bfi r1, r3, #2, #1
967 ; CHECK-NEXT: rsbs r2, r2, #0
968 ; CHECK-NEXT: bfi r1, r2, #3, #1
969 ; CHECK-NEXT: lsls r2, r1, #31
970 ; CHECK-NEXT: ittt ne
971 ; CHECK-NEXT: vstrne s0, [sp, #12]
972 ; CHECK-NEXT: ldrne r2, [sp, #12]
973 ; CHECK-NEXT: strne r2, [r0]
974 ; CHECK-NEXT: lsls r2, r1, #30
975 ; CHECK-NEXT: ittt mi
976 ; CHECK-NEXT: vstrmi s1, [sp, #8]
977 ; CHECK-NEXT: ldrmi r2, [sp, #8]
978 ; CHECK-NEXT: strmi r2, [r0, #4]
979 ; CHECK-NEXT: lsls r2, r1, #29
980 ; CHECK-NEXT: ittt mi
981 ; CHECK-NEXT: vstrmi s2, [sp, #4]
982 ; CHECK-NEXT: ldrmi r2, [sp, #4]
983 ; CHECK-NEXT: strmi r2, [r0, #8]
984 ; CHECK-NEXT: lsls r1, r1, #28
985 ; CHECK-NEXT: ittt mi
986 ; CHECK-NEXT: vstrmi s3, [sp]
987 ; CHECK-NEXT: ldrmi r1, [sp]
988 ; CHECK-NEXT: strmi r1, [r0, #12]
989 ; CHECK-NEXT: add sp, #24
990 ; CHECK-NEXT: pop {r7, pc}
992 %0 = load <4 x i16>, <4 x i16>* %mask, align 2
993 %1 = icmp sgt <4 x i16> %0, zeroinitializer
994 %2 = call <4 x half> @llvm.masked.load.v4f16(<4 x half>* %src, i32 2, <4 x i1> %1, <4 x half> undef)
995 %3 = fpext <4 x half> %2 to <4 x float>
996 call void @llvm.masked.store.v4f32(<4 x float> %3, <4 x float>* %dest, i32 2, <4 x i1> %1)
1000 define void @foo_v4f32_v4f16_unaligned(<4 x float> *%dest, <4 x i16> *%mask, <4 x half> *%src) {
1001 ; CHECK-LABEL: foo_v4f32_v4f16_unaligned:
1002 ; CHECK: @ %bb.0: @ %entry
1003 ; CHECK-NEXT: .save {r7, lr}
1004 ; CHECK-NEXT: push {r7, lr}
1005 ; CHECK-NEXT: .pad #24
1006 ; CHECK-NEXT: sub sp, #24
1007 ; CHECK-NEXT: vldrh.s32 q0, [r1]
1008 ; CHECK-NEXT: mov.w lr, #0
1009 ; CHECK-NEXT: @ implicit-def: $q1
1010 ; CHECK-NEXT: vcmp.s32 gt, q0, zr
1011 ; CHECK-NEXT: vmrs r3, p0
1012 ; CHECK-NEXT: and r1, r3, #1
1013 ; CHECK-NEXT: rsb.w r12, r1, #0
1014 ; CHECK-NEXT: ubfx r1, r3, #4, #1
1015 ; CHECK-NEXT: bfi lr, r12, #0, #1
1016 ; CHECK-NEXT: rsbs r1, r1, #0
1017 ; CHECK-NEXT: bfi lr, r1, #1, #1
1018 ; CHECK-NEXT: ubfx r1, r3, #8, #1
1019 ; CHECK-NEXT: rsbs r1, r1, #0
1020 ; CHECK-NEXT: bfi lr, r1, #2, #1
1021 ; CHECK-NEXT: ubfx r1, r3, #12, #1
1022 ; CHECK-NEXT: rsbs r1, r1, #0
1023 ; CHECK-NEXT: bfi lr, r1, #3, #1
1024 ; CHECK-NEXT: lsls.w r1, lr, #31
1025 ; CHECK-NEXT: beq .LBB19_2
1026 ; CHECK-NEXT: @ %bb.1: @ %cond.load
1027 ; CHECK-NEXT: vldr.16 s4, [r2]
1028 ; CHECK-NEXT: .LBB19_2: @ %else
1029 ; CHECK-NEXT: lsls.w r1, lr, #30
1030 ; CHECK-NEXT: bpl .LBB19_6
1031 ; CHECK-NEXT: @ %bb.3: @ %cond.load1
1032 ; CHECK-NEXT: vldr.16 s0, [r2, #2]
1033 ; CHECK-NEXT: vmov r3, s4
1034 ; CHECK-NEXT: vmovx.f16 s4, s5
1035 ; CHECK-NEXT: vmov r1, s0
1036 ; CHECK-NEXT: vmov.16 q0[0], r3
1037 ; CHECK-NEXT: vmov.16 q0[1], r1
1038 ; CHECK-NEXT: vmov r1, s5
1039 ; CHECK-NEXT: vmov.16 q0[2], r1
1040 ; CHECK-NEXT: vmov r1, s4
1041 ; CHECK-NEXT: vmov.16 q0[3], r1
1042 ; CHECK-NEXT: lsls.w r1, lr, #29
1043 ; CHECK-NEXT: bmi .LBB19_7
1044 ; CHECK-NEXT: .LBB19_4:
1045 ; CHECK-NEXT: vmov q2, q0
1046 ; CHECK-NEXT: lsls.w r1, lr, #28
1047 ; CHECK-NEXT: bmi .LBB19_8
1048 ; CHECK-NEXT: .LBB19_5:
1049 ; CHECK-NEXT: vmov q1, q2
1050 ; CHECK-NEXT: b .LBB19_9
1051 ; CHECK-NEXT: .LBB19_6:
1052 ; CHECK-NEXT: vmov q0, q1
1053 ; CHECK-NEXT: lsls.w r1, lr, #29
1054 ; CHECK-NEXT: bpl .LBB19_4
1055 ; CHECK-NEXT: .LBB19_7: @ %cond.load4
1056 ; CHECK-NEXT: vmovx.f16 s4, s0
1057 ; CHECK-NEXT: vmov r1, s0
1058 ; CHECK-NEXT: vmov r3, s4
1059 ; CHECK-NEXT: vldr.16 s4, [r2, #4]
1060 ; CHECK-NEXT: vmov.16 q2[0], r1
1061 ; CHECK-NEXT: vmovx.f16 s0, s1
1062 ; CHECK-NEXT: vmov.16 q2[1], r3
1063 ; CHECK-NEXT: vmov r1, s4
1064 ; CHECK-NEXT: vmov.16 q2[2], r1
1065 ; CHECK-NEXT: vmov r1, s0
1066 ; CHECK-NEXT: vmov.16 q2[3], r1
1067 ; CHECK-NEXT: lsls.w r1, lr, #28
1068 ; CHECK-NEXT: bpl .LBB19_5
1069 ; CHECK-NEXT: .LBB19_8: @ %cond.load7
1070 ; CHECK-NEXT: vmovx.f16 s0, s8
1071 ; CHECK-NEXT: vmov r3, s8
1072 ; CHECK-NEXT: vmov r1, s0
1073 ; CHECK-NEXT: vmov.16 q1[0], r3
1074 ; CHECK-NEXT: vldr.16 s0, [r2, #6]
1075 ; CHECK-NEXT: vmov.16 q1[1], r1
1076 ; CHECK-NEXT: vmov r1, s9
1077 ; CHECK-NEXT: vmov.16 q1[2], r1
1078 ; CHECK-NEXT: vmov r1, s0
1079 ; CHECK-NEXT: vmov.16 q1[3], r1
1080 ; CHECK-NEXT: .LBB19_9: @ %else8
1081 ; CHECK-NEXT: vmrs r2, p0
1082 ; CHECK-NEXT: vmovx.f16 s0, s5
1083 ; CHECK-NEXT: vcvtb.f32.f16 s3, s0
1084 ; CHECK-NEXT: vmovx.f16 s8, s4
1085 ; CHECK-NEXT: vcvtb.f32.f16 s2, s5
1086 ; CHECK-NEXT: movs r1, #0
1087 ; CHECK-NEXT: vcvtb.f32.f16 s1, s8
1088 ; CHECK-NEXT: vcvtb.f32.f16 s0, s4
1089 ; CHECK-NEXT: and r3, r2, #1
1090 ; CHECK-NEXT: rsbs r3, r3, #0
1091 ; CHECK-NEXT: bfi r1, r3, #0, #1
1092 ; CHECK-NEXT: ubfx r3, r2, #4, #1
1093 ; CHECK-NEXT: rsbs r3, r3, #0
1094 ; CHECK-NEXT: bfi r1, r3, #1, #1
1095 ; CHECK-NEXT: ubfx r3, r2, #8, #1
1096 ; CHECK-NEXT: ubfx r2, r2, #12, #1
1097 ; CHECK-NEXT: rsbs r3, r3, #0
1098 ; CHECK-NEXT: bfi r1, r3, #2, #1
1099 ; CHECK-NEXT: rsbs r2, r2, #0
1100 ; CHECK-NEXT: bfi r1, r2, #3, #1
1101 ; CHECK-NEXT: lsls r2, r1, #31
1102 ; CHECK-NEXT: ittt ne
1103 ; CHECK-NEXT: vstrne s0, [sp, #12]
1104 ; CHECK-NEXT: ldrne r2, [sp, #12]
1105 ; CHECK-NEXT: strne r2, [r0]
1106 ; CHECK-NEXT: lsls r2, r1, #30
1107 ; CHECK-NEXT: ittt mi
1108 ; CHECK-NEXT: vstrmi s1, [sp, #8]
1109 ; CHECK-NEXT: ldrmi r2, [sp, #8]
1110 ; CHECK-NEXT: strmi r2, [r0, #4]
1111 ; CHECK-NEXT: lsls r2, r1, #29
1112 ; CHECK-NEXT: ittt mi
1113 ; CHECK-NEXT: vstrmi s2, [sp, #4]
1114 ; CHECK-NEXT: ldrmi r2, [sp, #4]
1115 ; CHECK-NEXT: strmi r2, [r0, #8]
1116 ; CHECK-NEXT: lsls r1, r1, #28
1117 ; CHECK-NEXT: ittt mi
1118 ; CHECK-NEXT: vstrmi s3, [sp]
1119 ; CHECK-NEXT: ldrmi r1, [sp]
1120 ; CHECK-NEXT: strmi r1, [r0, #12]
1121 ; CHECK-NEXT: add sp, #24
1122 ; CHECK-NEXT: pop {r7, pc}
1124 %0 = load <4 x i16>, <4 x i16>* %mask, align 2
1125 %1 = icmp sgt <4 x i16> %0, zeroinitializer
1126 %2 = call <4 x half> @llvm.masked.load.v4f16(<4 x half>* %src, i32 2, <4 x i1> %1, <4 x half> undef)
1127 %3 = fpext <4 x half> %2 to <4 x float>
1128 call void @llvm.masked.store.v4f32(<4 x float> %3, <4 x float>* %dest, i32 1, <4 x i1> %1)
1132 declare void @llvm.masked.store.v4i32(<4 x i32>, <4 x i32>*, i32, <4 x i1>)
1133 declare void @llvm.masked.store.v8i16(<8 x i16>, <8 x i16>*, i32, <8 x i1>)
1134 declare void @llvm.masked.store.v16i8(<16 x i8>, <16 x i8>*, i32, <16 x i1>)
1135 declare void @llvm.masked.store.v8f16(<8 x half>, <8 x half>*, i32, <8 x i1>)
1136 declare void @llvm.masked.store.v4f32(<4 x float>, <4 x float>*, i32, <4 x i1>)
1137 declare <16 x i8> @llvm.masked.load.v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
1138 declare <8 x i16> @llvm.masked.load.v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
1139 declare <2 x i32> @llvm.masked.load.v2i32(<2 x i32>*, i32, <2 x i1>, <2 x i32>)
1140 declare <4 x i32> @llvm.masked.load.v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
1141 declare <4 x float> @llvm.masked.load.v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
1142 declare <4 x half> @llvm.masked.load.v4f16(<4 x half>*, i32, <4 x i1>, <4 x half>)
1143 declare <8 x half> @llvm.masked.load.v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
1145 declare void @llvm.masked.store.v8i8(<8 x i8>, <8 x i8>*, i32, <8 x i1>)
1146 declare void @llvm.masked.store.v4i8(<4 x i8>, <4 x i8>*, i32, <4 x i1>)
1147 declare void @llvm.masked.store.v4i16(<4 x i16>, <4 x i16>*, i32, <4 x i1>)
1148 declare void @llvm.masked.store.v2i64(<2 x i64>, <2 x i64>*, i32, <2 x i1>)
1149 declare <4 x i16> @llvm.masked.load.v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
1150 declare <4 x i8> @llvm.masked.load.v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
1151 declare <8 x i8> @llvm.masked.load.v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)