1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -enable-arm-maskedldst -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-LE
3 ; RUN: llc -mtriple=thumbebv8.1m.main-arm-none-eabi -mattr=+mve,+fullfp16 -enable-arm-maskedldst -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK --check-prefix=CHECK-BE
5 define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_zero(<4 x i32> *%dest, <4 x i32> %a) {
6 ; CHECK-LE-LABEL: masked_v4i32_align4_zero:
7 ; CHECK-LE: @ %bb.0: @ %entry
8 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
9 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
10 ; CHECK-LE-NEXT: bx lr
12 ; CHECK-BE-LABEL: masked_v4i32_align4_zero:
13 ; CHECK-BE: @ %bb.0: @ %entry
14 ; CHECK-BE-NEXT: vrev64.32 q1, q0
15 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
16 ; CHECK-BE-NEXT: vldrwt.u32 q1, [r0]
17 ; CHECK-BE-NEXT: vrev64.32 q0, q1
18 ; CHECK-BE-NEXT: bx lr
20 %c = icmp sgt <4 x i32> %a, zeroinitializer
21 %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> zeroinitializer)
25 define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_undef(<4 x i32> *%dest, <4 x i32> %a) {
26 ; CHECK-LE-LABEL: masked_v4i32_align4_undef:
27 ; CHECK-LE: @ %bb.0: @ %entry
28 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
29 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
30 ; CHECK-LE-NEXT: bx lr
32 ; CHECK-BE-LABEL: masked_v4i32_align4_undef:
33 ; CHECK-BE: @ %bb.0: @ %entry
34 ; CHECK-BE-NEXT: vrev64.32 q1, q0
35 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
36 ; CHECK-BE-NEXT: vldrwt.u32 q1, [r0]
37 ; CHECK-BE-NEXT: vrev64.32 q0, q1
38 ; CHECK-BE-NEXT: bx lr
40 %c = icmp sgt <4 x i32> %a, zeroinitializer
41 %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> undef)
45 define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align1_undef(<4 x i32> *%dest, <4 x i32> %a) {
46 ; CHECK-LE-LABEL: masked_v4i32_align1_undef:
47 ; CHECK-LE: @ %bb.0: @ %entry
48 ; CHECK-LE-NEXT: .pad #4
49 ; CHECK-LE-NEXT: sub sp, #4
50 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
51 ; CHECK-LE-NEXT: @ implicit-def: $q0
52 ; CHECK-LE-NEXT: vmrs r2, p0
53 ; CHECK-LE-NEXT: and r1, r2, #1
54 ; CHECK-LE-NEXT: rsbs r3, r1, #0
55 ; CHECK-LE-NEXT: movs r1, #0
56 ; CHECK-LE-NEXT: bfi r1, r3, #0, #1
57 ; CHECK-LE-NEXT: ubfx r3, r2, #4, #1
58 ; CHECK-LE-NEXT: rsbs r3, r3, #0
59 ; CHECK-LE-NEXT: bfi r1, r3, #1, #1
60 ; CHECK-LE-NEXT: ubfx r3, r2, #8, #1
61 ; CHECK-LE-NEXT: ubfx r2, r2, #12, #1
62 ; CHECK-LE-NEXT: rsbs r3, r3, #0
63 ; CHECK-LE-NEXT: bfi r1, r3, #2, #1
64 ; CHECK-LE-NEXT: rsbs r2, r2, #0
65 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
66 ; CHECK-LE-NEXT: lsls r2, r1, #31
67 ; CHECK-LE-NEXT: itt ne
68 ; CHECK-LE-NEXT: ldrne r2, [r0]
69 ; CHECK-LE-NEXT: vmovne.32 q0[0], r2
70 ; CHECK-LE-NEXT: lsls r2, r1, #30
71 ; CHECK-LE-NEXT: itt mi
72 ; CHECK-LE-NEXT: ldrmi r2, [r0, #4]
73 ; CHECK-LE-NEXT: vmovmi.32 q0[1], r2
74 ; CHECK-LE-NEXT: lsls r2, r1, #29
75 ; CHECK-LE-NEXT: itt mi
76 ; CHECK-LE-NEXT: ldrmi r2, [r0, #8]
77 ; CHECK-LE-NEXT: vmovmi.32 q0[2], r2
78 ; CHECK-LE-NEXT: lsls r1, r1, #28
79 ; CHECK-LE-NEXT: itt mi
80 ; CHECK-LE-NEXT: ldrmi r0, [r0, #12]
81 ; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
82 ; CHECK-LE-NEXT: add sp, #4
83 ; CHECK-LE-NEXT: bx lr
85 ; CHECK-BE-LABEL: masked_v4i32_align1_undef:
86 ; CHECK-BE: @ %bb.0: @ %entry
87 ; CHECK-BE-NEXT: .pad #4
88 ; CHECK-BE-NEXT: sub sp, #4
89 ; CHECK-BE-NEXT: vrev64.32 q1, q0
90 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
91 ; CHECK-BE-NEXT: @ implicit-def: $q1
92 ; CHECK-BE-NEXT: vmrs r2, p0
93 ; CHECK-BE-NEXT: and r1, r2, #1
94 ; CHECK-BE-NEXT: rsbs r3, r1, #0
95 ; CHECK-BE-NEXT: movs r1, #0
96 ; CHECK-BE-NEXT: bfi r1, r3, #0, #1
97 ; CHECK-BE-NEXT: ubfx r3, r2, #4, #1
98 ; CHECK-BE-NEXT: rsbs r3, r3, #0
99 ; CHECK-BE-NEXT: bfi r1, r3, #1, #1
100 ; CHECK-BE-NEXT: ubfx r3, r2, #8, #1
101 ; CHECK-BE-NEXT: ubfx r2, r2, #12, #1
102 ; CHECK-BE-NEXT: rsbs r3, r3, #0
103 ; CHECK-BE-NEXT: bfi r1, r3, #2, #1
104 ; CHECK-BE-NEXT: rsbs r2, r2, #0
105 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
106 ; CHECK-BE-NEXT: lsls r2, r1, #31
107 ; CHECK-BE-NEXT: itt ne
108 ; CHECK-BE-NEXT: ldrne r2, [r0]
109 ; CHECK-BE-NEXT: vmovne.32 q1[0], r2
110 ; CHECK-BE-NEXT: lsls r2, r1, #30
111 ; CHECK-BE-NEXT: itt mi
112 ; CHECK-BE-NEXT: ldrmi r2, [r0, #4]
113 ; CHECK-BE-NEXT: vmovmi.32 q1[1], r2
114 ; CHECK-BE-NEXT: lsls r2, r1, #29
115 ; CHECK-BE-NEXT: itt mi
116 ; CHECK-BE-NEXT: ldrmi r2, [r0, #8]
117 ; CHECK-BE-NEXT: vmovmi.32 q1[2], r2
118 ; CHECK-BE-NEXT: lsls r1, r1, #28
119 ; CHECK-BE-NEXT: itt mi
120 ; CHECK-BE-NEXT: ldrmi r0, [r0, #12]
121 ; CHECK-BE-NEXT: vmovmi.32 q1[3], r0
122 ; CHECK-BE-NEXT: vrev64.32 q0, q1
123 ; CHECK-BE-NEXT: add sp, #4
124 ; CHECK-BE-NEXT: bx lr
126 %c = icmp sgt <4 x i32> %a, zeroinitializer
127 %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 1, <4 x i1> %c, <4 x i32> undef)
131 define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_other(<4 x i32> *%dest, <4 x i32> %a) {
132 ; CHECK-LE-LABEL: masked_v4i32_align4_other:
133 ; CHECK-LE: @ %bb.0: @ %entry
134 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
135 ; CHECK-LE-NEXT: vldrwt.u32 q1, [r0]
136 ; CHECK-LE-NEXT: vpsel q0, q1, q0
137 ; CHECK-LE-NEXT: bx lr
139 ; CHECK-BE-LABEL: masked_v4i32_align4_other:
140 ; CHECK-BE: @ %bb.0: @ %entry
141 ; CHECK-BE-NEXT: vrev64.32 q1, q0
142 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
143 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0]
144 ; CHECK-BE-NEXT: vpsel q1, q0, q1
145 ; CHECK-BE-NEXT: vrev64.32 q0, q1
146 ; CHECK-BE-NEXT: bx lr
148 %c = icmp sgt <4 x i32> %a, zeroinitializer
149 %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> %a)
153 define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_zero(<4 x i16> *%dest, <4 x i32> %a) {
154 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_zero:
155 ; CHECK-LE: @ %bb.0: @ %entry
156 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
157 ; CHECK-LE-NEXT: vldrht.u32 q0, [r0]
158 ; CHECK-LE-NEXT: bx lr
160 ; CHECK-BE-LABEL: zext16_masked_v4i32_align2_zero:
161 ; CHECK-BE: @ %bb.0: @ %entry
162 ; CHECK-BE-NEXT: vrev64.32 q1, q0
163 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
164 ; CHECK-BE-NEXT: vldrht.u32 q1, [r0]
165 ; CHECK-BE-NEXT: vrev64.32 q0, q1
166 ; CHECK-BE-NEXT: bx lr
168 %c = icmp sgt <4 x i32> %a, zeroinitializer
169 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
170 %ext = zext <4 x i16> %l to <4 x i32>
174 define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_undef(<4 x i16> *%dest, <4 x i32> %a) {
175 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_undef:
176 ; CHECK-LE: @ %bb.0: @ %entry
177 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
178 ; CHECK-LE-NEXT: vldrht.u32 q0, [r0]
179 ; CHECK-LE-NEXT: bx lr
181 ; CHECK-BE-LABEL: zext16_masked_v4i32_align2_undef:
182 ; CHECK-BE: @ %bb.0: @ %entry
183 ; CHECK-BE-NEXT: vrev64.32 q1, q0
184 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
185 ; CHECK-BE-NEXT: vldrht.u32 q1, [r0]
186 ; CHECK-BE-NEXT: vrev64.32 q0, q1
187 ; CHECK-BE-NEXT: bx lr
189 %c = icmp sgt <4 x i32> %a, zeroinitializer
190 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> undef)
191 %ext = zext <4 x i16> %l to <4 x i32>
195 define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align1_undef(<4 x i16> *%dest, <4 x i32> %a) {
196 ; CHECK-LE-LABEL: zext16_masked_v4i32_align1_undef:
197 ; CHECK-LE: @ %bb.0: @ %entry
198 ; CHECK-LE-NEXT: .pad #4
199 ; CHECK-LE-NEXT: sub sp, #4
200 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
201 ; CHECK-LE-NEXT: @ implicit-def: $q0
202 ; CHECK-LE-NEXT: vmrs r2, p0
203 ; CHECK-LE-NEXT: and r1, r2, #1
204 ; CHECK-LE-NEXT: rsbs r3, r1, #0
205 ; CHECK-LE-NEXT: movs r1, #0
206 ; CHECK-LE-NEXT: bfi r1, r3, #0, #1
207 ; CHECK-LE-NEXT: ubfx r3, r2, #4, #1
208 ; CHECK-LE-NEXT: rsbs r3, r3, #0
209 ; CHECK-LE-NEXT: bfi r1, r3, #1, #1
210 ; CHECK-LE-NEXT: ubfx r3, r2, #8, #1
211 ; CHECK-LE-NEXT: ubfx r2, r2, #12, #1
212 ; CHECK-LE-NEXT: rsbs r3, r3, #0
213 ; CHECK-LE-NEXT: bfi r1, r3, #2, #1
214 ; CHECK-LE-NEXT: rsbs r2, r2, #0
215 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
216 ; CHECK-LE-NEXT: lsls r2, r1, #31
217 ; CHECK-LE-NEXT: itt ne
218 ; CHECK-LE-NEXT: ldrhne r2, [r0]
219 ; CHECK-LE-NEXT: vmovne.32 q0[0], r2
220 ; CHECK-LE-NEXT: lsls r2, r1, #30
221 ; CHECK-LE-NEXT: itt mi
222 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #2]
223 ; CHECK-LE-NEXT: vmovmi.32 q0[1], r2
224 ; CHECK-LE-NEXT: lsls r2, r1, #29
225 ; CHECK-LE-NEXT: itt mi
226 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #4]
227 ; CHECK-LE-NEXT: vmovmi.32 q0[2], r2
228 ; CHECK-LE-NEXT: lsls r1, r1, #28
229 ; CHECK-LE-NEXT: itt mi
230 ; CHECK-LE-NEXT: ldrhmi r0, [r0, #6]
231 ; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
232 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
233 ; CHECK-LE-NEXT: add sp, #4
234 ; CHECK-LE-NEXT: bx lr
236 ; CHECK-BE-LABEL: zext16_masked_v4i32_align1_undef:
237 ; CHECK-BE: @ %bb.0: @ %entry
238 ; CHECK-BE-NEXT: .pad #4
239 ; CHECK-BE-NEXT: sub sp, #4
240 ; CHECK-BE-NEXT: vrev64.32 q1, q0
241 ; CHECK-BE-NEXT: @ implicit-def: $q0
242 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
243 ; CHECK-BE-NEXT: vmrs r2, p0
244 ; CHECK-BE-NEXT: and r1, r2, #1
245 ; CHECK-BE-NEXT: rsbs r3, r1, #0
246 ; CHECK-BE-NEXT: movs r1, #0
247 ; CHECK-BE-NEXT: bfi r1, r3, #0, #1
248 ; CHECK-BE-NEXT: ubfx r3, r2, #4, #1
249 ; CHECK-BE-NEXT: rsbs r3, r3, #0
250 ; CHECK-BE-NEXT: bfi r1, r3, #1, #1
251 ; CHECK-BE-NEXT: ubfx r3, r2, #8, #1
252 ; CHECK-BE-NEXT: ubfx r2, r2, #12, #1
253 ; CHECK-BE-NEXT: rsbs r3, r3, #0
254 ; CHECK-BE-NEXT: bfi r1, r3, #2, #1
255 ; CHECK-BE-NEXT: rsbs r2, r2, #0
256 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
257 ; CHECK-BE-NEXT: lsls r2, r1, #31
258 ; CHECK-BE-NEXT: itt ne
259 ; CHECK-BE-NEXT: ldrhne r2, [r0]
260 ; CHECK-BE-NEXT: vmovne.32 q0[0], r2
261 ; CHECK-BE-NEXT: lsls r2, r1, #30
262 ; CHECK-BE-NEXT: itt mi
263 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #2]
264 ; CHECK-BE-NEXT: vmovmi.32 q0[1], r2
265 ; CHECK-BE-NEXT: lsls r2, r1, #29
266 ; CHECK-BE-NEXT: itt mi
267 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #4]
268 ; CHECK-BE-NEXT: vmovmi.32 q0[2], r2
269 ; CHECK-BE-NEXT: lsls r1, r1, #28
270 ; CHECK-BE-NEXT: itt mi
271 ; CHECK-BE-NEXT: ldrhmi r0, [r0, #6]
272 ; CHECK-BE-NEXT: vmovmi.32 q0[3], r0
273 ; CHECK-BE-NEXT: vmovlb.s16 q1, q0
274 ; CHECK-BE-NEXT: vrev64.32 q0, q1
275 ; CHECK-BE-NEXT: add sp, #4
276 ; CHECK-BE-NEXT: bx lr
278 %c = icmp sgt <4 x i32> %a, zeroinitializer
279 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> undef)
280 %ext = sext <4 x i16> %l to <4 x i32>
284 define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_other(<4 x i16> *%dest, <4 x i16> %a) {
285 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_other:
286 ; CHECK-LE: @ %bb.0: @ %entry
287 ; CHECK-LE-NEXT: vmovlb.u16 q1, q0
288 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
289 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
290 ; CHECK-LE-NEXT: vldrht.u32 q0, [r0]
291 ; CHECK-LE-NEXT: vpsel q0, q0, q1
292 ; CHECK-LE-NEXT: bx lr
294 ; CHECK-BE-LABEL: zext16_masked_v4i32_align2_other:
295 ; CHECK-BE: @ %bb.0: @ %entry
296 ; CHECK-BE-NEXT: vrev64.32 q1, q0
297 ; CHECK-BE-NEXT: vmovlb.u16 q0, q1
298 ; CHECK-BE-NEXT: vmovlb.s16 q1, q1
299 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
300 ; CHECK-BE-NEXT: vldrht.u32 q1, [r0]
301 ; CHECK-BE-NEXT: vpsel q1, q1, q0
302 ; CHECK-BE-NEXT: vrev64.32 q0, q1
303 ; CHECK-BE-NEXT: bx lr
305 %c = icmp sgt <4 x i16> %a, zeroinitializer
306 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> %a)
307 %ext = zext <4 x i16> %l to <4 x i32>
311 define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_zero(<4 x i16> *%dest, <4 x i32> %a) {
312 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_zero:
313 ; CHECK-LE: @ %bb.0: @ %entry
314 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
315 ; CHECK-LE-NEXT: vldrht.s32 q0, [r0]
316 ; CHECK-LE-NEXT: bx lr
318 ; CHECK-BE-LABEL: sext16_masked_v4i32_align2_zero:
319 ; CHECK-BE: @ %bb.0: @ %entry
320 ; CHECK-BE-NEXT: vrev64.32 q1, q0
321 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
322 ; CHECK-BE-NEXT: vldrht.s32 q1, [r0]
323 ; CHECK-BE-NEXT: vrev64.32 q0, q1
324 ; CHECK-BE-NEXT: bx lr
326 %c = icmp sgt <4 x i32> %a, zeroinitializer
327 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
328 %sext = sext <4 x i16> %l to <4 x i32>
332 define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_undef(<4 x i16> *%dest, <4 x i32> %a) {
333 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_undef:
334 ; CHECK-LE: @ %bb.0: @ %entry
335 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
336 ; CHECK-LE-NEXT: vldrht.s32 q0, [r0]
337 ; CHECK-LE-NEXT: bx lr
339 ; CHECK-BE-LABEL: sext16_masked_v4i32_align2_undef:
340 ; CHECK-BE: @ %bb.0: @ %entry
341 ; CHECK-BE-NEXT: vrev64.32 q1, q0
342 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
343 ; CHECK-BE-NEXT: vldrht.s32 q1, [r0]
344 ; CHECK-BE-NEXT: vrev64.32 q0, q1
345 ; CHECK-BE-NEXT: bx lr
347 %c = icmp sgt <4 x i32> %a, zeroinitializer
348 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> undef)
349 %sext = sext <4 x i16> %l to <4 x i32>
353 define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align1_undef(<4 x i16> *%dest, <4 x i32> %a) {
354 ; CHECK-LE-LABEL: sext16_masked_v4i32_align1_undef:
355 ; CHECK-LE: @ %bb.0: @ %entry
356 ; CHECK-LE-NEXT: .pad #4
357 ; CHECK-LE-NEXT: sub sp, #4
358 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
359 ; CHECK-LE-NEXT: @ implicit-def: $q0
360 ; CHECK-LE-NEXT: vmrs r2, p0
361 ; CHECK-LE-NEXT: and r1, r2, #1
362 ; CHECK-LE-NEXT: rsbs r3, r1, #0
363 ; CHECK-LE-NEXT: movs r1, #0
364 ; CHECK-LE-NEXT: bfi r1, r3, #0, #1
365 ; CHECK-LE-NEXT: ubfx r3, r2, #4, #1
366 ; CHECK-LE-NEXT: rsbs r3, r3, #0
367 ; CHECK-LE-NEXT: bfi r1, r3, #1, #1
368 ; CHECK-LE-NEXT: ubfx r3, r2, #8, #1
369 ; CHECK-LE-NEXT: ubfx r2, r2, #12, #1
370 ; CHECK-LE-NEXT: rsbs r3, r3, #0
371 ; CHECK-LE-NEXT: bfi r1, r3, #2, #1
372 ; CHECK-LE-NEXT: rsbs r2, r2, #0
373 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
374 ; CHECK-LE-NEXT: lsls r2, r1, #31
375 ; CHECK-LE-NEXT: itt ne
376 ; CHECK-LE-NEXT: ldrhne r2, [r0]
377 ; CHECK-LE-NEXT: vmovne.32 q0[0], r2
378 ; CHECK-LE-NEXT: lsls r2, r1, #30
379 ; CHECK-LE-NEXT: itt mi
380 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #2]
381 ; CHECK-LE-NEXT: vmovmi.32 q0[1], r2
382 ; CHECK-LE-NEXT: lsls r2, r1, #29
383 ; CHECK-LE-NEXT: itt mi
384 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #4]
385 ; CHECK-LE-NEXT: vmovmi.32 q0[2], r2
386 ; CHECK-LE-NEXT: lsls r1, r1, #28
387 ; CHECK-LE-NEXT: itt mi
388 ; CHECK-LE-NEXT: ldrhmi r0, [r0, #6]
389 ; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
390 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
391 ; CHECK-LE-NEXT: add sp, #4
392 ; CHECK-LE-NEXT: bx lr
394 ; CHECK-BE-LABEL: sext16_masked_v4i32_align1_undef:
395 ; CHECK-BE: @ %bb.0: @ %entry
396 ; CHECK-BE-NEXT: .pad #4
397 ; CHECK-BE-NEXT: sub sp, #4
398 ; CHECK-BE-NEXT: vrev64.32 q1, q0
399 ; CHECK-BE-NEXT: @ implicit-def: $q0
400 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
401 ; CHECK-BE-NEXT: vmrs r2, p0
402 ; CHECK-BE-NEXT: and r1, r2, #1
403 ; CHECK-BE-NEXT: rsbs r3, r1, #0
404 ; CHECK-BE-NEXT: movs r1, #0
405 ; CHECK-BE-NEXT: bfi r1, r3, #0, #1
406 ; CHECK-BE-NEXT: ubfx r3, r2, #4, #1
407 ; CHECK-BE-NEXT: rsbs r3, r3, #0
408 ; CHECK-BE-NEXT: bfi r1, r3, #1, #1
409 ; CHECK-BE-NEXT: ubfx r3, r2, #8, #1
410 ; CHECK-BE-NEXT: ubfx r2, r2, #12, #1
411 ; CHECK-BE-NEXT: rsbs r3, r3, #0
412 ; CHECK-BE-NEXT: bfi r1, r3, #2, #1
413 ; CHECK-BE-NEXT: rsbs r2, r2, #0
414 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
415 ; CHECK-BE-NEXT: lsls r2, r1, #31
416 ; CHECK-BE-NEXT: itt ne
417 ; CHECK-BE-NEXT: ldrhne r2, [r0]
418 ; CHECK-BE-NEXT: vmovne.32 q0[0], r2
419 ; CHECK-BE-NEXT: lsls r2, r1, #30
420 ; CHECK-BE-NEXT: itt mi
421 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #2]
422 ; CHECK-BE-NEXT: vmovmi.32 q0[1], r2
423 ; CHECK-BE-NEXT: lsls r2, r1, #29
424 ; CHECK-BE-NEXT: itt mi
425 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #4]
426 ; CHECK-BE-NEXT: vmovmi.32 q0[2], r2
427 ; CHECK-BE-NEXT: lsls r1, r1, #28
428 ; CHECK-BE-NEXT: itt mi
429 ; CHECK-BE-NEXT: ldrhmi r0, [r0, #6]
430 ; CHECK-BE-NEXT: vmovmi.32 q0[3], r0
431 ; CHECK-BE-NEXT: vmovlb.s16 q1, q0
432 ; CHECK-BE-NEXT: vrev64.32 q0, q1
433 ; CHECK-BE-NEXT: add sp, #4
434 ; CHECK-BE-NEXT: bx lr
436 %c = icmp sgt <4 x i32> %a, zeroinitializer
437 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> undef)
438 %sext = sext <4 x i16> %l to <4 x i32>
442 define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_other(<4 x i16> *%dest, <4 x i16> %a) {
443 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_other:
444 ; CHECK-LE: @ %bb.0: @ %entry
445 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
446 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
447 ; CHECK-LE-NEXT: vldrht.s32 q1, [r0]
448 ; CHECK-LE-NEXT: vpsel q0, q1, q0
449 ; CHECK-LE-NEXT: bx lr
451 ; CHECK-BE-LABEL: sext16_masked_v4i32_align2_other:
452 ; CHECK-BE: @ %bb.0: @ %entry
453 ; CHECK-BE-NEXT: vrev64.32 q1, q0
454 ; CHECK-BE-NEXT: vmovlb.s16 q0, q1
455 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
456 ; CHECK-BE-NEXT: vldrht.s32 q1, [r0]
457 ; CHECK-BE-NEXT: vpsel q1, q1, q0
458 ; CHECK-BE-NEXT: vrev64.32 q0, q1
459 ; CHECK-BE-NEXT: bx lr
461 %c = icmp sgt <4 x i16> %a, zeroinitializer
462 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> %a)
463 %sext = sext <4 x i16> %l to <4 x i32>
467 define arm_aapcs_vfpcc i8* @masked_v4i32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
468 ; CHECK-LE-LABEL: masked_v4i32_preinc:
469 ; CHECK-LE: @ %bb.0: @ %entry
470 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
471 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0, #4]
472 ; CHECK-LE-NEXT: adds r0, #4
473 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
474 ; CHECK-LE-NEXT: bx lr
476 ; CHECK-BE-LABEL: masked_v4i32_preinc:
477 ; CHECK-BE: @ %bb.0: @ %entry
478 ; CHECK-BE-NEXT: vrev64.32 q1, q0
479 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
480 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0, #4]
481 ; CHECK-BE-NEXT: adds r0, #4
482 ; CHECK-BE-NEXT: vstrw.32 q0, [r1]
483 ; CHECK-BE-NEXT: bx lr
485 %z = getelementptr inbounds i8, i8* %x, i32 4
486 %0 = bitcast i8* %z to <4 x i32>*
487 %c = icmp sgt <4 x i32> %a, zeroinitializer
488 %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
489 %2 = bitcast i8* %y to <4 x i32>*
490 store <4 x i32> %1, <4 x i32>* %2, align 4
494 define arm_aapcs_vfpcc i8* @masked_v4i32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
495 ; CHECK-LE-LABEL: masked_v4i32_postinc:
496 ; CHECK-LE: @ %bb.0: @ %entry
497 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
498 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
499 ; CHECK-LE-NEXT: adds r0, #4
500 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
501 ; CHECK-LE-NEXT: bx lr
503 ; CHECK-BE-LABEL: masked_v4i32_postinc:
504 ; CHECK-BE: @ %bb.0: @ %entry
505 ; CHECK-BE-NEXT: vrev64.32 q1, q0
506 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
507 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0]
508 ; CHECK-BE-NEXT: adds r0, #4
509 ; CHECK-BE-NEXT: vstrw.32 q0, [r1]
510 ; CHECK-BE-NEXT: bx lr
512 %z = getelementptr inbounds i8, i8* %x, i32 4
513 %0 = bitcast i8* %x to <4 x i32>*
514 %c = icmp sgt <4 x i32> %a, zeroinitializer
515 %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
516 %2 = bitcast i8* %y to <4 x i32>*
517 store <4 x i32> %1, <4 x i32>* %2, align 4
521 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_zero(<8 x i16> *%dest, <8 x i16> %a) {
522 ; CHECK-LE-LABEL: masked_v8i16_align4_zero:
523 ; CHECK-LE: @ %bb.0: @ %entry
524 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
525 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
526 ; CHECK-LE-NEXT: bx lr
528 ; CHECK-BE-LABEL: masked_v8i16_align4_zero:
529 ; CHECK-BE: @ %bb.0: @ %entry
530 ; CHECK-BE-NEXT: vrev64.16 q1, q0
531 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
532 ; CHECK-BE-NEXT: vldrht.u16 q1, [r0]
533 ; CHECK-BE-NEXT: vrev64.16 q0, q1
534 ; CHECK-BE-NEXT: bx lr
536 %c = icmp sgt <8 x i16> %a, zeroinitializer
537 %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> zeroinitializer)
541 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align2_undef(<8 x i16> *%dest, <8 x i16> %a) {
542 ; CHECK-LE-LABEL: masked_v8i16_align2_undef:
543 ; CHECK-LE: @ %bb.0: @ %entry
544 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
545 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
546 ; CHECK-LE-NEXT: bx lr
548 ; CHECK-BE-LABEL: masked_v8i16_align2_undef:
549 ; CHECK-BE: @ %bb.0: @ %entry
550 ; CHECK-BE-NEXT: vrev64.16 q1, q0
551 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
552 ; CHECK-BE-NEXT: vldrht.u16 q1, [r0]
553 ; CHECK-BE-NEXT: vrev64.16 q0, q1
554 ; CHECK-BE-NEXT: bx lr
556 %c = icmp sgt <8 x i16> %a, zeroinitializer
557 %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> undef)
561 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8 x i16> %a) {
562 ; CHECK-LE-LABEL: masked_v8i16_align1_undef:
563 ; CHECK-LE: @ %bb.0: @ %entry
564 ; CHECK-LE-NEXT: .pad #8
565 ; CHECK-LE-NEXT: sub sp, #8
566 ; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr
567 ; CHECK-LE-NEXT: @ implicit-def: $q0
568 ; CHECK-LE-NEXT: vmrs r1, p0
569 ; CHECK-LE-NEXT: and r2, r1, #1
570 ; CHECK-LE-NEXT: rsbs r3, r2, #0
571 ; CHECK-LE-NEXT: movs r2, #0
572 ; CHECK-LE-NEXT: bfi r2, r3, #0, #1
573 ; CHECK-LE-NEXT: ubfx r3, r1, #2, #1
574 ; CHECK-LE-NEXT: rsbs r3, r3, #0
575 ; CHECK-LE-NEXT: bfi r2, r3, #1, #1
576 ; CHECK-LE-NEXT: ubfx r3, r1, #4, #1
577 ; CHECK-LE-NEXT: rsbs r3, r3, #0
578 ; CHECK-LE-NEXT: bfi r2, r3, #2, #1
579 ; CHECK-LE-NEXT: ubfx r3, r1, #6, #1
580 ; CHECK-LE-NEXT: rsbs r3, r3, #0
581 ; CHECK-LE-NEXT: bfi r2, r3, #3, #1
582 ; CHECK-LE-NEXT: ubfx r3, r1, #8, #1
583 ; CHECK-LE-NEXT: rsbs r3, r3, #0
584 ; CHECK-LE-NEXT: bfi r2, r3, #4, #1
585 ; CHECK-LE-NEXT: ubfx r3, r1, #10, #1
586 ; CHECK-LE-NEXT: rsbs r3, r3, #0
587 ; CHECK-LE-NEXT: bfi r2, r3, #5, #1
588 ; CHECK-LE-NEXT: ubfx r3, r1, #12, #1
589 ; CHECK-LE-NEXT: ubfx r1, r1, #14, #1
590 ; CHECK-LE-NEXT: rsbs r3, r3, #0
591 ; CHECK-LE-NEXT: bfi r2, r3, #6, #1
592 ; CHECK-LE-NEXT: rsbs r1, r1, #0
593 ; CHECK-LE-NEXT: bfi r2, r1, #7, #1
594 ; CHECK-LE-NEXT: uxtb r1, r2
595 ; CHECK-LE-NEXT: lsls r2, r2, #31
596 ; CHECK-LE-NEXT: itt ne
597 ; CHECK-LE-NEXT: ldrhne r2, [r0]
598 ; CHECK-LE-NEXT: vmovne.16 q0[0], r2
599 ; CHECK-LE-NEXT: lsls r2, r1, #30
600 ; CHECK-LE-NEXT: itt mi
601 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #2]
602 ; CHECK-LE-NEXT: vmovmi.16 q0[1], r2
603 ; CHECK-LE-NEXT: lsls r2, r1, #29
604 ; CHECK-LE-NEXT: itt mi
605 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #4]
606 ; CHECK-LE-NEXT: vmovmi.16 q0[2], r2
607 ; CHECK-LE-NEXT: lsls r2, r1, #28
608 ; CHECK-LE-NEXT: itt mi
609 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #6]
610 ; CHECK-LE-NEXT: vmovmi.16 q0[3], r2
611 ; CHECK-LE-NEXT: lsls r2, r1, #27
612 ; CHECK-LE-NEXT: itt mi
613 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #8]
614 ; CHECK-LE-NEXT: vmovmi.16 q0[4], r2
615 ; CHECK-LE-NEXT: lsls r2, r1, #26
616 ; CHECK-LE-NEXT: itt mi
617 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #10]
618 ; CHECK-LE-NEXT: vmovmi.16 q0[5], r2
619 ; CHECK-LE-NEXT: lsls r2, r1, #25
620 ; CHECK-LE-NEXT: itt mi
621 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #12]
622 ; CHECK-LE-NEXT: vmovmi.16 q0[6], r2
623 ; CHECK-LE-NEXT: lsls r1, r1, #24
624 ; CHECK-LE-NEXT: itt mi
625 ; CHECK-LE-NEXT: ldrhmi r0, [r0, #14]
626 ; CHECK-LE-NEXT: vmovmi.16 q0[7], r0
627 ; CHECK-LE-NEXT: add sp, #8
628 ; CHECK-LE-NEXT: bx lr
630 ; CHECK-BE-LABEL: masked_v8i16_align1_undef:
631 ; CHECK-BE: @ %bb.0: @ %entry
632 ; CHECK-BE-NEXT: .pad #8
633 ; CHECK-BE-NEXT: sub sp, #8
634 ; CHECK-BE-NEXT: vrev64.16 q1, q0
635 ; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr
636 ; CHECK-BE-NEXT: @ implicit-def: $q1
637 ; CHECK-BE-NEXT: vmrs r1, p0
638 ; CHECK-BE-NEXT: and r2, r1, #1
639 ; CHECK-BE-NEXT: rsbs r3, r2, #0
640 ; CHECK-BE-NEXT: movs r2, #0
641 ; CHECK-BE-NEXT: bfi r2, r3, #0, #1
642 ; CHECK-BE-NEXT: ubfx r3, r1, #2, #1
643 ; CHECK-BE-NEXT: rsbs r3, r3, #0
644 ; CHECK-BE-NEXT: bfi r2, r3, #1, #1
645 ; CHECK-BE-NEXT: ubfx r3, r1, #4, #1
646 ; CHECK-BE-NEXT: rsbs r3, r3, #0
647 ; CHECK-BE-NEXT: bfi r2, r3, #2, #1
648 ; CHECK-BE-NEXT: ubfx r3, r1, #6, #1
649 ; CHECK-BE-NEXT: rsbs r3, r3, #0
650 ; CHECK-BE-NEXT: bfi r2, r3, #3, #1
651 ; CHECK-BE-NEXT: ubfx r3, r1, #8, #1
652 ; CHECK-BE-NEXT: rsbs r3, r3, #0
653 ; CHECK-BE-NEXT: bfi r2, r3, #4, #1
654 ; CHECK-BE-NEXT: ubfx r3, r1, #10, #1
655 ; CHECK-BE-NEXT: rsbs r3, r3, #0
656 ; CHECK-BE-NEXT: bfi r2, r3, #5, #1
657 ; CHECK-BE-NEXT: ubfx r3, r1, #12, #1
658 ; CHECK-BE-NEXT: ubfx r1, r1, #14, #1
659 ; CHECK-BE-NEXT: rsbs r3, r3, #0
660 ; CHECK-BE-NEXT: bfi r2, r3, #6, #1
661 ; CHECK-BE-NEXT: rsbs r1, r1, #0
662 ; CHECK-BE-NEXT: bfi r2, r1, #7, #1
663 ; CHECK-BE-NEXT: uxtb r1, r2
664 ; CHECK-BE-NEXT: lsls r2, r2, #31
665 ; CHECK-BE-NEXT: itt ne
666 ; CHECK-BE-NEXT: ldrhne r2, [r0]
667 ; CHECK-BE-NEXT: vmovne.16 q1[0], r2
668 ; CHECK-BE-NEXT: lsls r2, r1, #30
669 ; CHECK-BE-NEXT: itt mi
670 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #2]
671 ; CHECK-BE-NEXT: vmovmi.16 q1[1], r2
672 ; CHECK-BE-NEXT: lsls r2, r1, #29
673 ; CHECK-BE-NEXT: itt mi
674 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #4]
675 ; CHECK-BE-NEXT: vmovmi.16 q1[2], r2
676 ; CHECK-BE-NEXT: lsls r2, r1, #28
677 ; CHECK-BE-NEXT: itt mi
678 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #6]
679 ; CHECK-BE-NEXT: vmovmi.16 q1[3], r2
680 ; CHECK-BE-NEXT: lsls r2, r1, #27
681 ; CHECK-BE-NEXT: itt mi
682 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #8]
683 ; CHECK-BE-NEXT: vmovmi.16 q1[4], r2
684 ; CHECK-BE-NEXT: lsls r2, r1, #26
685 ; CHECK-BE-NEXT: itt mi
686 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #10]
687 ; CHECK-BE-NEXT: vmovmi.16 q1[5], r2
688 ; CHECK-BE-NEXT: lsls r2, r1, #25
689 ; CHECK-BE-NEXT: itt mi
690 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #12]
691 ; CHECK-BE-NEXT: vmovmi.16 q1[6], r2
692 ; CHECK-BE-NEXT: lsls r1, r1, #24
693 ; CHECK-BE-NEXT: itt mi
694 ; CHECK-BE-NEXT: ldrhmi r0, [r0, #14]
695 ; CHECK-BE-NEXT: vmovmi.16 q1[7], r0
696 ; CHECK-BE-NEXT: vrev64.16 q0, q1
697 ; CHECK-BE-NEXT: add sp, #8
698 ; CHECK-BE-NEXT: bx lr
700 %c = icmp sgt <8 x i16> %a, zeroinitializer
701 %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 1, <8 x i1> %c, <8 x i16> undef)
705 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_other(<8 x i16> *%dest, <8 x i16> %a) {
706 ; CHECK-LE-LABEL: masked_v8i16_align4_other:
707 ; CHECK-LE: @ %bb.0: @ %entry
708 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
709 ; CHECK-LE-NEXT: vldrht.u16 q1, [r0]
710 ; CHECK-LE-NEXT: vpsel q0, q1, q0
711 ; CHECK-LE-NEXT: bx lr
713 ; CHECK-BE-LABEL: masked_v8i16_align4_other:
714 ; CHECK-BE: @ %bb.0: @ %entry
715 ; CHECK-BE-NEXT: vrev64.16 q1, q0
716 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
717 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0]
718 ; CHECK-BE-NEXT: vpsel q1, q0, q1
719 ; CHECK-BE-NEXT: vrev64.16 q0, q1
720 ; CHECK-BE-NEXT: bx lr
722 %c = icmp sgt <8 x i16> %a, zeroinitializer
723 %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> %a)
727 define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_zero(<8 x i8> *%dest, <8 x i8> %a) {
728 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_zero:
729 ; CHECK-LE: @ %bb.0: @ %entry
730 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
731 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
732 ; CHECK-LE-NEXT: vldrbt.s16 q0, [r0]
733 ; CHECK-LE-NEXT: bx lr
735 ; CHECK-BE-LABEL: sext8_masked_v8i16_align1_zero:
736 ; CHECK-BE: @ %bb.0: @ %entry
737 ; CHECK-BE-NEXT: vrev64.16 q1, q0
738 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
739 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
740 ; CHECK-BE-NEXT: vldrbt.s16 q1, [r0]
741 ; CHECK-BE-NEXT: vrev64.16 q0, q1
742 ; CHECK-BE-NEXT: bx lr
744 %c = icmp sgt <8 x i8> %a, zeroinitializer
745 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
746 %ext = sext <8 x i8> %l to <8 x i16>
750 define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_undef(<8 x i8> *%dest, <8 x i8> %a) {
751 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_undef:
752 ; CHECK-LE: @ %bb.0: @ %entry
753 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
754 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
755 ; CHECK-LE-NEXT: vldrbt.s16 q0, [r0]
756 ; CHECK-LE-NEXT: bx lr
758 ; CHECK-BE-LABEL: sext8_masked_v8i16_align1_undef:
759 ; CHECK-BE: @ %bb.0: @ %entry
760 ; CHECK-BE-NEXT: vrev64.16 q1, q0
761 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
762 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
763 ; CHECK-BE-NEXT: vldrbt.s16 q1, [r0]
764 ; CHECK-BE-NEXT: vrev64.16 q0, q1
765 ; CHECK-BE-NEXT: bx lr
767 %c = icmp sgt <8 x i8> %a, zeroinitializer
768 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> undef)
769 %ext = sext <8 x i8> %l to <8 x i16>
773 define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_other(<8 x i8> *%dest, <8 x i8> %a) {
774 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_other:
775 ; CHECK-LE: @ %bb.0: @ %entry
776 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
777 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
778 ; CHECK-LE-NEXT: vldrbt.s16 q1, [r0]
779 ; CHECK-LE-NEXT: vpsel q0, q1, q0
780 ; CHECK-LE-NEXT: bx lr
782 ; CHECK-BE-LABEL: sext8_masked_v8i16_align1_other:
783 ; CHECK-BE: @ %bb.0: @ %entry
784 ; CHECK-BE-NEXT: vrev64.16 q1, q0
785 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
786 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
787 ; CHECK-BE-NEXT: vldrbt.s16 q1, [r0]
788 ; CHECK-BE-NEXT: vpsel q1, q1, q0
789 ; CHECK-BE-NEXT: vrev64.16 q0, q1
790 ; CHECK-BE-NEXT: bx lr
792 %c = icmp sgt <8 x i8> %a, zeroinitializer
793 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> %a)
794 %ext = sext <8 x i8> %l to <8 x i16>
798 define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_zero(<4 x i8> *%dest, <4 x i8> %a) {
799 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_zero:
800 ; CHECK-LE: @ %bb.0: @ %entry
801 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
802 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
803 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
804 ; CHECK-LE-NEXT: vldrbt.s32 q0, [r0]
805 ; CHECK-LE-NEXT: bx lr
807 ; CHECK-BE-LABEL: sext8_masked_v4i32_align1_zero:
808 ; CHECK-BE: @ %bb.0: @ %entry
809 ; CHECK-BE-NEXT: vrev64.32 q1, q0
810 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
811 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
812 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
813 ; CHECK-BE-NEXT: vldrbt.s32 q1, [r0]
814 ; CHECK-BE-NEXT: vrev64.32 q0, q1
815 ; CHECK-BE-NEXT: bx lr
817 %c = icmp sgt <4 x i8> %a, zeroinitializer
818 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
819 %ext = sext <4 x i8> %l to <4 x i32>
823 define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_undef(<4 x i8> *%dest, <4 x i8> %a) {
824 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_undef:
825 ; CHECK-LE: @ %bb.0: @ %entry
826 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
827 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
828 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
829 ; CHECK-LE-NEXT: vldrbt.s32 q0, [r0]
830 ; CHECK-LE-NEXT: bx lr
832 ; CHECK-BE-LABEL: sext8_masked_v4i32_align1_undef:
833 ; CHECK-BE: @ %bb.0: @ %entry
834 ; CHECK-BE-NEXT: vrev64.32 q1, q0
835 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
836 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
837 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
838 ; CHECK-BE-NEXT: vldrbt.s32 q1, [r0]
839 ; CHECK-BE-NEXT: vrev64.32 q0, q1
840 ; CHECK-BE-NEXT: bx lr
842 %c = icmp sgt <4 x i8> %a, zeroinitializer
843 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> undef)
844 %ext = sext <4 x i8> %l to <4 x i32>
848 define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_other(<4 x i8> *%dest, <4 x i8> %a) {
849 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_other:
850 ; CHECK-LE: @ %bb.0: @ %entry
851 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
852 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
853 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
854 ; CHECK-LE-NEXT: vldrbt.s32 q1, [r0]
855 ; CHECK-LE-NEXT: vpsel q0, q1, q0
856 ; CHECK-LE-NEXT: bx lr
858 ; CHECK-BE-LABEL: sext8_masked_v4i32_align1_other:
859 ; CHECK-BE: @ %bb.0: @ %entry
860 ; CHECK-BE-NEXT: vrev64.32 q1, q0
861 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
862 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
863 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
864 ; CHECK-BE-NEXT: vldrbt.s32 q1, [r0]
865 ; CHECK-BE-NEXT: vpsel q1, q1, q0
866 ; CHECK-BE-NEXT: vrev64.32 q0, q1
867 ; CHECK-BE-NEXT: bx lr
869 %c = icmp sgt <4 x i8> %a, zeroinitializer
870 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> %a)
871 %ext = sext <4 x i8> %l to <4 x i32>
875 define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_zero(<4 x i8> *%dest, <4 x i8> %a) {
876 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_zero:
877 ; CHECK-LE: @ %bb.0: @ %entry
878 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
879 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
880 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
881 ; CHECK-LE-NEXT: vldrbt.u32 q0, [r0]
882 ; CHECK-LE-NEXT: bx lr
884 ; CHECK-BE-LABEL: zext8_masked_v4i32_align1_zero:
885 ; CHECK-BE: @ %bb.0: @ %entry
886 ; CHECK-BE-NEXT: vrev64.32 q1, q0
887 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
888 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
889 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
890 ; CHECK-BE-NEXT: vldrbt.u32 q1, [r0]
891 ; CHECK-BE-NEXT: vrev64.32 q0, q1
892 ; CHECK-BE-NEXT: bx lr
894 %c = icmp sgt <4 x i8> %a, zeroinitializer
895 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
896 %ext = zext <4 x i8> %l to <4 x i32>
900 define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_undef(<4 x i8> *%dest, <4 x i8> %a) {
901 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_undef:
902 ; CHECK-LE: @ %bb.0: @ %entry
903 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
904 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
905 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
906 ; CHECK-LE-NEXT: vldrbt.u32 q0, [r0]
907 ; CHECK-LE-NEXT: bx lr
909 ; CHECK-BE-LABEL: zext8_masked_v4i32_align1_undef:
910 ; CHECK-BE: @ %bb.0: @ %entry
911 ; CHECK-BE-NEXT: vrev64.32 q1, q0
912 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
913 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
914 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
915 ; CHECK-BE-NEXT: vldrbt.u32 q1, [r0]
916 ; CHECK-BE-NEXT: vrev64.32 q0, q1
917 ; CHECK-BE-NEXT: bx lr
919 %c = icmp sgt <4 x i8> %a, zeroinitializer
920 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> undef)
921 %ext = zext <4 x i8> %l to <4 x i32>
925 define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_other(<4 x i8> *%dest, <4 x i8> %a) {
926 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_other:
927 ; CHECK-LE: @ %bb.0: @ %entry
928 ; CHECK-LE-NEXT: vmov.i32 q1, #0xff
929 ; CHECK-LE-NEXT: vand q1, q0, q1
930 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
931 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
932 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
933 ; CHECK-LE-NEXT: vldrbt.u32 q0, [r0]
934 ; CHECK-LE-NEXT: vpsel q0, q0, q1
935 ; CHECK-LE-NEXT: bx lr
937 ; CHECK-BE-LABEL: zext8_masked_v4i32_align1_other:
938 ; CHECK-BE: @ %bb.0: @ %entry
939 ; CHECK-BE-NEXT: vmov.i32 q1, #0xff
940 ; CHECK-BE-NEXT: vrev64.32 q2, q0
941 ; CHECK-BE-NEXT: vand q0, q2, q1
942 ; CHECK-BE-NEXT: vmovlb.s8 q1, q2
943 ; CHECK-BE-NEXT: vmovlb.s16 q1, q1
944 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
945 ; CHECK-BE-NEXT: vldrbt.u32 q1, [r0]
946 ; CHECK-BE-NEXT: vpsel q1, q1, q0
947 ; CHECK-BE-NEXT: vrev64.32 q0, q1
948 ; CHECK-BE-NEXT: bx lr
950 %c = icmp sgt <4 x i8> %a, zeroinitializer
951 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> %a)
952 %ext = zext <4 x i8> %l to <4 x i32>
956 define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_zero(<8 x i8> *%dest, <8 x i8> %a) {
957 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_zero:
958 ; CHECK-LE: @ %bb.0: @ %entry
959 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
960 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
961 ; CHECK-LE-NEXT: vldrbt.u16 q0, [r0]
962 ; CHECK-LE-NEXT: bx lr
964 ; CHECK-BE-LABEL: zext8_masked_v8i16_align1_zero:
965 ; CHECK-BE: @ %bb.0: @ %entry
966 ; CHECK-BE-NEXT: vrev64.16 q1, q0
967 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
968 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
969 ; CHECK-BE-NEXT: vldrbt.u16 q1, [r0]
970 ; CHECK-BE-NEXT: vrev64.16 q0, q1
971 ; CHECK-BE-NEXT: bx lr
973 %c = icmp sgt <8 x i8> %a, zeroinitializer
974 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
975 %ext = zext <8 x i8> %l to <8 x i16>
979 define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_undef(<8 x i8> *%dest, <8 x i8> %a) {
980 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_undef:
981 ; CHECK-LE: @ %bb.0: @ %entry
982 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
983 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
984 ; CHECK-LE-NEXT: vldrbt.u16 q0, [r0]
985 ; CHECK-LE-NEXT: bx lr
987 ; CHECK-BE-LABEL: zext8_masked_v8i16_align1_undef:
988 ; CHECK-BE: @ %bb.0: @ %entry
989 ; CHECK-BE-NEXT: vrev64.16 q1, q0
990 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
991 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
992 ; CHECK-BE-NEXT: vldrbt.u16 q1, [r0]
993 ; CHECK-BE-NEXT: vrev64.16 q0, q1
994 ; CHECK-BE-NEXT: bx lr
996 %c = icmp sgt <8 x i8> %a, zeroinitializer
997 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> undef)
998 %ext = zext <8 x i8> %l to <8 x i16>
1002 define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_other(<8 x i8> *%dest, <8 x i8> %a) {
1003 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_other:
1004 ; CHECK-LE: @ %bb.0: @ %entry
1005 ; CHECK-LE-NEXT: vmovlb.u8 q1, q0
1006 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
1007 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1008 ; CHECK-LE-NEXT: vldrbt.u16 q0, [r0]
1009 ; CHECK-LE-NEXT: vpsel q0, q0, q1
1010 ; CHECK-LE-NEXT: bx lr
1012 ; CHECK-BE-LABEL: zext8_masked_v8i16_align1_other:
1013 ; CHECK-BE: @ %bb.0: @ %entry
1014 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1015 ; CHECK-BE-NEXT: vmovlb.u8 q0, q1
1016 ; CHECK-BE-NEXT: vmovlb.s8 q1, q1
1017 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1018 ; CHECK-BE-NEXT: vldrbt.u16 q1, [r0]
1019 ; CHECK-BE-NEXT: vpsel q1, q1, q0
1020 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1021 ; CHECK-BE-NEXT: bx lr
1023 %c = icmp sgt <8 x i8> %a, zeroinitializer
1024 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> %a)
1025 %ext = zext <8 x i8> %l to <8 x i16>
1029 define i8* @masked_v8i16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
1030 ; CHECK-LE-LABEL: masked_v8i16_preinc:
1031 ; CHECK-LE: @ %bb.0: @ %entry
1032 ; CHECK-LE-NEXT: vldr d1, [sp]
1033 ; CHECK-LE-NEXT: vmov d0, r2, r3
1034 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1035 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0, #4]
1036 ; CHECK-LE-NEXT: adds r0, #4
1037 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1038 ; CHECK-LE-NEXT: bx lr
1040 ; CHECK-BE-LABEL: masked_v8i16_preinc:
1041 ; CHECK-BE: @ %bb.0: @ %entry
1042 ; CHECK-BE-NEXT: vldr d1, [sp]
1043 ; CHECK-BE-NEXT: vmov d0, r3, r2
1044 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1045 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1046 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0, #4]
1047 ; CHECK-BE-NEXT: adds r0, #4
1048 ; CHECK-BE-NEXT: vstrh.16 q0, [r1]
1049 ; CHECK-BE-NEXT: bx lr
1051 %z = getelementptr inbounds i8, i8* %x, i32 4
1052 %0 = bitcast i8* %z to <8 x i16>*
1053 %c = icmp sgt <8 x i16> %a, zeroinitializer
1054 %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef)
1055 %2 = bitcast i8* %y to <8 x i16>*
1056 store <8 x i16> %1, <8 x i16>* %2, align 4
1060 define arm_aapcs_vfpcc i8* @masked_v8i16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
1061 ; CHECK-LE-LABEL: masked_v8i16_postinc:
1062 ; CHECK-LE: @ %bb.0: @ %entry
1063 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1064 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
1065 ; CHECK-LE-NEXT: adds r0, #4
1066 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1067 ; CHECK-LE-NEXT: bx lr
1069 ; CHECK-BE-LABEL: masked_v8i16_postinc:
1070 ; CHECK-BE: @ %bb.0: @ %entry
1071 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1072 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1073 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0]
1074 ; CHECK-BE-NEXT: adds r0, #4
1075 ; CHECK-BE-NEXT: vstrh.16 q0, [r1]
1076 ; CHECK-BE-NEXT: bx lr
1078 %z = getelementptr inbounds i8, i8* %x, i32 4
1079 %0 = bitcast i8* %x to <8 x i16>*
1080 %c = icmp sgt <8 x i16> %a, zeroinitializer
1081 %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef)
1082 %2 = bitcast i8* %y to <8 x i16>*
1083 store <8 x i16> %1, <8 x i16>* %2, align 4
1088 define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_zero(<16 x i8> *%dest, <16 x i8> %a) {
1089 ; CHECK-LE-LABEL: masked_v16i8_align4_zero:
1090 ; CHECK-LE: @ %bb.0: @ %entry
1091 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1092 ; CHECK-LE-NEXT: vldrbt.u8 q0, [r0]
1093 ; CHECK-LE-NEXT: bx lr
1095 ; CHECK-BE-LABEL: masked_v16i8_align4_zero:
1096 ; CHECK-BE: @ %bb.0: @ %entry
1097 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1098 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1099 ; CHECK-BE-NEXT: vldrbt.u8 q1, [r0]
1100 ; CHECK-BE-NEXT: vrev64.8 q0, q1
1101 ; CHECK-BE-NEXT: bx lr
1103 %c = icmp sgt <16 x i8> %a, zeroinitializer
1104 %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> zeroinitializer)
1108 define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_undef(<16 x i8> *%dest, <16 x i8> %a) {
1109 ; CHECK-LE-LABEL: masked_v16i8_align4_undef:
1110 ; CHECK-LE: @ %bb.0: @ %entry
1111 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1112 ; CHECK-LE-NEXT: vldrbt.u8 q0, [r0]
1113 ; CHECK-LE-NEXT: bx lr
1115 ; CHECK-BE-LABEL: masked_v16i8_align4_undef:
1116 ; CHECK-BE: @ %bb.0: @ %entry
1117 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1118 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1119 ; CHECK-BE-NEXT: vldrbt.u8 q1, [r0]
1120 ; CHECK-BE-NEXT: vrev64.8 q0, q1
1121 ; CHECK-BE-NEXT: bx lr
1123 %c = icmp sgt <16 x i8> %a, zeroinitializer
1124 %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> undef)
1128 define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_other(<16 x i8> *%dest, <16 x i8> %a) {
1129 ; CHECK-LE-LABEL: masked_v16i8_align4_other:
1130 ; CHECK-LE: @ %bb.0: @ %entry
1131 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1132 ; CHECK-LE-NEXT: vldrbt.u8 q1, [r0]
1133 ; CHECK-LE-NEXT: vpsel q0, q1, q0
1134 ; CHECK-LE-NEXT: bx lr
1136 ; CHECK-BE-LABEL: masked_v16i8_align4_other:
1137 ; CHECK-BE: @ %bb.0: @ %entry
1138 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1139 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1140 ; CHECK-BE-NEXT: vldrbt.u8 q0, [r0]
1141 ; CHECK-BE-NEXT: vpsel q1, q0, q1
1142 ; CHECK-BE-NEXT: vrev64.8 q0, q1
1143 ; CHECK-BE-NEXT: bx lr
1145 %c = icmp sgt <16 x i8> %a, zeroinitializer
1146 %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> %a)
1150 define arm_aapcs_vfpcc i8* @masked_v16i8_preinc(i8* %x, i8* %y, <16 x i8> %a) {
1151 ; CHECK-LE-LABEL: masked_v16i8_preinc:
1152 ; CHECK-LE: @ %bb.0: @ %entry
1153 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1154 ; CHECK-LE-NEXT: vldrbt.u8 q0, [r0, #4]
1155 ; CHECK-LE-NEXT: adds r0, #4
1156 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1157 ; CHECK-LE-NEXT: bx lr
1159 ; CHECK-BE-LABEL: masked_v16i8_preinc:
1160 ; CHECK-BE: @ %bb.0: @ %entry
1161 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1162 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1163 ; CHECK-BE-NEXT: vldrbt.u8 q0, [r0, #4]
1164 ; CHECK-BE-NEXT: adds r0, #4
1165 ; CHECK-BE-NEXT: vstrb.8 q0, [r1]
1166 ; CHECK-BE-NEXT: bx lr
1168 %z = getelementptr inbounds i8, i8* %x, i32 4
1169 %0 = bitcast i8* %z to <16 x i8>*
1170 %c = icmp sgt <16 x i8> %a, zeroinitializer
1171 %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef)
1172 %2 = bitcast i8* %y to <16 x i8>*
1173 store <16 x i8> %1, <16 x i8>* %2, align 4
1177 define arm_aapcs_vfpcc i8* @masked_v16i8_postinc(i8* %x, i8* %y, <16 x i8> %a) {
1178 ; CHECK-LE-LABEL: masked_v16i8_postinc:
1179 ; CHECK-LE: @ %bb.0: @ %entry
1180 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1181 ; CHECK-LE-NEXT: vldrbt.u8 q0, [r0]
1182 ; CHECK-LE-NEXT: adds r0, #4
1183 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1184 ; CHECK-LE-NEXT: bx lr
1186 ; CHECK-BE-LABEL: masked_v16i8_postinc:
1187 ; CHECK-BE: @ %bb.0: @ %entry
1188 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1189 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1190 ; CHECK-BE-NEXT: vldrbt.u8 q0, [r0]
1191 ; CHECK-BE-NEXT: adds r0, #4
1192 ; CHECK-BE-NEXT: vstrb.8 q0, [r1]
1193 ; CHECK-BE-NEXT: bx lr
1195 %z = getelementptr inbounds i8, i8* %x, i32 4
1196 %0 = bitcast i8* %x to <16 x i8>*
1197 %c = icmp sgt <16 x i8> %a, zeroinitializer
1198 %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef)
1199 %2 = bitcast i8* %y to <16 x i8>*
1200 store <16 x i8> %1, <16 x i8>* %2, align 4
1205 define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_zero(<4 x float> *%dest, <4 x i32> %a) {
1206 ; CHECK-LE-LABEL: masked_v4f32_align4_zero:
1207 ; CHECK-LE: @ %bb.0: @ %entry
1208 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1209 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
1210 ; CHECK-LE-NEXT: bx lr
1212 ; CHECK-BE-LABEL: masked_v4f32_align4_zero:
1213 ; CHECK-BE: @ %bb.0: @ %entry
1214 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1215 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1216 ; CHECK-BE-NEXT: vldrwt.u32 q1, [r0]
1217 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1218 ; CHECK-BE-NEXT: bx lr
1220 %c = icmp sgt <4 x i32> %a, zeroinitializer
1221 %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> zeroinitializer)
1225 define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_undef(<4 x float> *%dest, <4 x i32> %a) {
1226 ; CHECK-LE-LABEL: masked_v4f32_align4_undef:
1227 ; CHECK-LE: @ %bb.0: @ %entry
1228 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1229 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
1230 ; CHECK-LE-NEXT: bx lr
1232 ; CHECK-BE-LABEL: masked_v4f32_align4_undef:
1233 ; CHECK-BE: @ %bb.0: @ %entry
1234 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1235 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1236 ; CHECK-BE-NEXT: vldrwt.u32 q1, [r0]
1237 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1238 ; CHECK-BE-NEXT: bx lr
1240 %c = icmp sgt <4 x i32> %a, zeroinitializer
1241 %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> undef)
1245 define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align1_undef(<4 x float> *%dest, <4 x i32> %a) {
1246 ; CHECK-LE-LABEL: masked_v4f32_align1_undef:
1247 ; CHECK-LE: @ %bb.0: @ %entry
1248 ; CHECK-LE-NEXT: .pad #4
1249 ; CHECK-LE-NEXT: sub sp, #4
1250 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
1251 ; CHECK-LE-NEXT: @ implicit-def: $q0
1252 ; CHECK-LE-NEXT: vmrs r2, p0
1253 ; CHECK-LE-NEXT: and r1, r2, #1
1254 ; CHECK-LE-NEXT: rsbs r3, r1, #0
1255 ; CHECK-LE-NEXT: movs r1, #0
1256 ; CHECK-LE-NEXT: bfi r1, r3, #0, #1
1257 ; CHECK-LE-NEXT: ubfx r3, r2, #4, #1
1258 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1259 ; CHECK-LE-NEXT: bfi r1, r3, #1, #1
1260 ; CHECK-LE-NEXT: ubfx r3, r2, #8, #1
1261 ; CHECK-LE-NEXT: ubfx r2, r2, #12, #1
1262 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1263 ; CHECK-LE-NEXT: bfi r1, r3, #2, #1
1264 ; CHECK-LE-NEXT: rsbs r2, r2, #0
1265 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
1266 ; CHECK-LE-NEXT: lsls r2, r1, #31
1267 ; CHECK-LE-NEXT: itt ne
1268 ; CHECK-LE-NEXT: ldrne r2, [r0]
1269 ; CHECK-LE-NEXT: vmovne s0, r2
1270 ; CHECK-LE-NEXT: lsls r2, r1, #30
1271 ; CHECK-LE-NEXT: itt mi
1272 ; CHECK-LE-NEXT: ldrmi r2, [r0, #4]
1273 ; CHECK-LE-NEXT: vmovmi s1, r2
1274 ; CHECK-LE-NEXT: lsls r2, r1, #29
1275 ; CHECK-LE-NEXT: itt mi
1276 ; CHECK-LE-NEXT: ldrmi r2, [r0, #8]
1277 ; CHECK-LE-NEXT: vmovmi s2, r2
1278 ; CHECK-LE-NEXT: lsls r1, r1, #28
1279 ; CHECK-LE-NEXT: itt mi
1280 ; CHECK-LE-NEXT: ldrmi r0, [r0, #12]
1281 ; CHECK-LE-NEXT: vmovmi s3, r0
1282 ; CHECK-LE-NEXT: add sp, #4
1283 ; CHECK-LE-NEXT: bx lr
1285 ; CHECK-BE-LABEL: masked_v4f32_align1_undef:
1286 ; CHECK-BE: @ %bb.0: @ %entry
1287 ; CHECK-BE-NEXT: .pad #4
1288 ; CHECK-BE-NEXT: sub sp, #4
1289 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1290 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
1291 ; CHECK-BE-NEXT: @ implicit-def: $q1
1292 ; CHECK-BE-NEXT: vmrs r2, p0
1293 ; CHECK-BE-NEXT: and r1, r2, #1
1294 ; CHECK-BE-NEXT: rsbs r3, r1, #0
1295 ; CHECK-BE-NEXT: movs r1, #0
1296 ; CHECK-BE-NEXT: bfi r1, r3, #0, #1
1297 ; CHECK-BE-NEXT: ubfx r3, r2, #4, #1
1298 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1299 ; CHECK-BE-NEXT: bfi r1, r3, #1, #1
1300 ; CHECK-BE-NEXT: ubfx r3, r2, #8, #1
1301 ; CHECK-BE-NEXT: ubfx r2, r2, #12, #1
1302 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1303 ; CHECK-BE-NEXT: bfi r1, r3, #2, #1
1304 ; CHECK-BE-NEXT: rsbs r2, r2, #0
1305 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
1306 ; CHECK-BE-NEXT: lsls r2, r1, #31
1307 ; CHECK-BE-NEXT: itt ne
1308 ; CHECK-BE-NEXT: ldrne r2, [r0]
1309 ; CHECK-BE-NEXT: vmovne s4, r2
1310 ; CHECK-BE-NEXT: lsls r2, r1, #30
1311 ; CHECK-BE-NEXT: itt mi
1312 ; CHECK-BE-NEXT: ldrmi r2, [r0, #4]
1313 ; CHECK-BE-NEXT: vmovmi s5, r2
1314 ; CHECK-BE-NEXT: lsls r2, r1, #29
1315 ; CHECK-BE-NEXT: itt mi
1316 ; CHECK-BE-NEXT: ldrmi r2, [r0, #8]
1317 ; CHECK-BE-NEXT: vmovmi s6, r2
1318 ; CHECK-BE-NEXT: lsls r1, r1, #28
1319 ; CHECK-BE-NEXT: itt mi
1320 ; CHECK-BE-NEXT: ldrmi r0, [r0, #12]
1321 ; CHECK-BE-NEXT: vmovmi s7, r0
1322 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1323 ; CHECK-BE-NEXT: add sp, #4
1324 ; CHECK-BE-NEXT: bx lr
1326 %c = icmp sgt <4 x i32> %a, zeroinitializer
1327 %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 1, <4 x i1> %c, <4 x float> undef)
1331 define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_other(<4 x float> *%dest, <4 x i32> %a, <4 x float> %b) {
1332 ; CHECK-LE-LABEL: masked_v4f32_align4_other:
1333 ; CHECK-LE: @ %bb.0: @ %entry
1334 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1335 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
1336 ; CHECK-LE-NEXT: vpsel q0, q0, q1
1337 ; CHECK-LE-NEXT: bx lr
1339 ; CHECK-BE-LABEL: masked_v4f32_align4_other:
1340 ; CHECK-BE: @ %bb.0: @ %entry
1341 ; CHECK-BE-NEXT: vrev64.32 q2, q1
1342 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1343 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1344 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0]
1345 ; CHECK-BE-NEXT: vpsel q1, q0, q2
1346 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1347 ; CHECK-BE-NEXT: bx lr
1349 %c = icmp sgt <4 x i32> %a, zeroinitializer
1350 %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> %b)
1354 define arm_aapcs_vfpcc i8* @masked_v4f32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
1355 ; CHECK-LE-LABEL: masked_v4f32_preinc:
1356 ; CHECK-LE: @ %bb.0: @ %entry
1357 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1358 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0, #4]
1359 ; CHECK-LE-NEXT: adds r0, #4
1360 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1361 ; CHECK-LE-NEXT: bx lr
1363 ; CHECK-BE-LABEL: masked_v4f32_preinc:
1364 ; CHECK-BE: @ %bb.0: @ %entry
1365 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1366 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1367 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0, #4]
1368 ; CHECK-BE-NEXT: adds r0, #4
1369 ; CHECK-BE-NEXT: vstrw.32 q0, [r1]
1370 ; CHECK-BE-NEXT: bx lr
1372 %z = getelementptr inbounds i8, i8* %x, i32 4
1373 %0 = bitcast i8* %z to <4 x float>*
1374 %c = icmp sgt <4 x i32> %a, zeroinitializer
1375 %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
1376 %2 = bitcast i8* %y to <4 x float>*
1377 store <4 x float> %1, <4 x float>* %2, align 4
1381 define arm_aapcs_vfpcc i8* @masked_v4f32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
1382 ; CHECK-LE-LABEL: masked_v4f32_postinc:
1383 ; CHECK-LE: @ %bb.0: @ %entry
1384 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1385 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
1386 ; CHECK-LE-NEXT: adds r0, #4
1387 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1388 ; CHECK-LE-NEXT: bx lr
1390 ; CHECK-BE-LABEL: masked_v4f32_postinc:
1391 ; CHECK-BE: @ %bb.0: @ %entry
1392 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1393 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1394 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0]
1395 ; CHECK-BE-NEXT: adds r0, #4
1396 ; CHECK-BE-NEXT: vstrw.32 q0, [r1]
1397 ; CHECK-BE-NEXT: bx lr
1399 %z = getelementptr inbounds i8, i8* %x, i32 4
1400 %0 = bitcast i8* %x to <4 x float>*
1401 %c = icmp sgt <4 x i32> %a, zeroinitializer
1402 %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
1403 %2 = bitcast i8* %y to <4 x float>*
1404 store <4 x float> %1, <4 x float>* %2, align 4
1409 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_zero(<8 x half> *%dest, <8 x i16> %a) {
1410 ; CHECK-LE-LABEL: masked_v8f16_align4_zero:
1411 ; CHECK-LE: @ %bb.0: @ %entry
1412 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1413 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
1414 ; CHECK-LE-NEXT: bx lr
1416 ; CHECK-BE-LABEL: masked_v8f16_align4_zero:
1417 ; CHECK-BE: @ %bb.0: @ %entry
1418 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1419 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1420 ; CHECK-BE-NEXT: vldrht.u16 q1, [r0]
1421 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1422 ; CHECK-BE-NEXT: bx lr
1424 %c = icmp sgt <8 x i16> %a, zeroinitializer
1425 %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> zeroinitializer)
1429 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_undef(<8 x half> *%dest, <8 x i16> %a) {
1430 ; CHECK-LE-LABEL: masked_v8f16_align4_undef:
1431 ; CHECK-LE: @ %bb.0: @ %entry
1432 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1433 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
1434 ; CHECK-LE-NEXT: bx lr
1436 ; CHECK-BE-LABEL: masked_v8f16_align4_undef:
1437 ; CHECK-BE: @ %bb.0: @ %entry
1438 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1439 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1440 ; CHECK-BE-NEXT: vldrht.u16 q1, [r0]
1441 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1442 ; CHECK-BE-NEXT: bx lr
1444 %c = icmp sgt <8 x i16> %a, zeroinitializer
1445 %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> undef)
1449 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest, <8 x i16> %a) {
1450 ; CHECK-LE-LABEL: masked_v8f16_align1_undef:
1451 ; CHECK-LE: @ %bb.0: @ %entry
1452 ; CHECK-LE-NEXT: .pad #40
1453 ; CHECK-LE-NEXT: sub sp, #40
1454 ; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr
1455 ; CHECK-LE-NEXT: @ implicit-def: $q0
1456 ; CHECK-LE-NEXT: vmrs r1, p0
1457 ; CHECK-LE-NEXT: and r2, r1, #1
1458 ; CHECK-LE-NEXT: rsbs r3, r2, #0
1459 ; CHECK-LE-NEXT: movs r2, #0
1460 ; CHECK-LE-NEXT: bfi r2, r3, #0, #1
1461 ; CHECK-LE-NEXT: ubfx r3, r1, #2, #1
1462 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1463 ; CHECK-LE-NEXT: bfi r2, r3, #1, #1
1464 ; CHECK-LE-NEXT: ubfx r3, r1, #4, #1
1465 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1466 ; CHECK-LE-NEXT: bfi r2, r3, #2, #1
1467 ; CHECK-LE-NEXT: ubfx r3, r1, #6, #1
1468 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1469 ; CHECK-LE-NEXT: bfi r2, r3, #3, #1
1470 ; CHECK-LE-NEXT: ubfx r3, r1, #8, #1
1471 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1472 ; CHECK-LE-NEXT: bfi r2, r3, #4, #1
1473 ; CHECK-LE-NEXT: ubfx r3, r1, #10, #1
1474 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1475 ; CHECK-LE-NEXT: bfi r2, r3, #5, #1
1476 ; CHECK-LE-NEXT: ubfx r3, r1, #12, #1
1477 ; CHECK-LE-NEXT: ubfx r1, r1, #14, #1
1478 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1479 ; CHECK-LE-NEXT: bfi r2, r3, #6, #1
1480 ; CHECK-LE-NEXT: rsbs r1, r1, #0
1481 ; CHECK-LE-NEXT: bfi r2, r1, #7, #1
1482 ; CHECK-LE-NEXT: uxtb r1, r2
1483 ; CHECK-LE-NEXT: lsls r2, r2, #31
1484 ; CHECK-LE-NEXT: bne .LBB45_9
1485 ; CHECK-LE-NEXT: @ %bb.1: @ %else
1486 ; CHECK-LE-NEXT: lsls r2, r1, #30
1487 ; CHECK-LE-NEXT: bmi .LBB45_10
1488 ; CHECK-LE-NEXT: .LBB45_2: @ %else2
1489 ; CHECK-LE-NEXT: lsls r2, r1, #29
1490 ; CHECK-LE-NEXT: bmi .LBB45_11
1491 ; CHECK-LE-NEXT: .LBB45_3: @ %else5
1492 ; CHECK-LE-NEXT: lsls r2, r1, #28
1493 ; CHECK-LE-NEXT: bmi .LBB45_12
1494 ; CHECK-LE-NEXT: .LBB45_4: @ %else8
1495 ; CHECK-LE-NEXT: lsls r2, r1, #27
1496 ; CHECK-LE-NEXT: bmi .LBB45_13
1497 ; CHECK-LE-NEXT: .LBB45_5: @ %else11
1498 ; CHECK-LE-NEXT: lsls r2, r1, #26
1499 ; CHECK-LE-NEXT: bmi .LBB45_14
1500 ; CHECK-LE-NEXT: .LBB45_6: @ %else14
1501 ; CHECK-LE-NEXT: lsls r2, r1, #25
1502 ; CHECK-LE-NEXT: bmi .LBB45_15
1503 ; CHECK-LE-NEXT: .LBB45_7: @ %else17
1504 ; CHECK-LE-NEXT: lsls r1, r1, #24
1505 ; CHECK-LE-NEXT: bmi .LBB45_16
1506 ; CHECK-LE-NEXT: .LBB45_8: @ %else20
1507 ; CHECK-LE-NEXT: add sp, #40
1508 ; CHECK-LE-NEXT: bx lr
1509 ; CHECK-LE-NEXT: .LBB45_9: @ %cond.load
1510 ; CHECK-LE-NEXT: ldrh r2, [r0]
1511 ; CHECK-LE-NEXT: strh.w r2, [sp, #28]
1512 ; CHECK-LE-NEXT: vldr.16 s0, [sp, #28]
1513 ; CHECK-LE-NEXT: lsls r2, r1, #30
1514 ; CHECK-LE-NEXT: bpl .LBB45_2
1515 ; CHECK-LE-NEXT: .LBB45_10: @ %cond.load1
1516 ; CHECK-LE-NEXT: ldrh r2, [r0, #2]
1517 ; CHECK-LE-NEXT: strh.w r2, [sp, #24]
1518 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #24]
1519 ; CHECK-LE-NEXT: vmov r2, s4
1520 ; CHECK-LE-NEXT: vmov.16 q0[1], r2
1521 ; CHECK-LE-NEXT: lsls r2, r1, #29
1522 ; CHECK-LE-NEXT: bpl .LBB45_3
1523 ; CHECK-LE-NEXT: .LBB45_11: @ %cond.load4
1524 ; CHECK-LE-NEXT: ldrh r2, [r0, #4]
1525 ; CHECK-LE-NEXT: strh.w r2, [sp, #20]
1526 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #20]
1527 ; CHECK-LE-NEXT: vmov r2, s4
1528 ; CHECK-LE-NEXT: vmov.16 q0[2], r2
1529 ; CHECK-LE-NEXT: lsls r2, r1, #28
1530 ; CHECK-LE-NEXT: bpl .LBB45_4
1531 ; CHECK-LE-NEXT: .LBB45_12: @ %cond.load7
1532 ; CHECK-LE-NEXT: ldrh r2, [r0, #6]
1533 ; CHECK-LE-NEXT: strh.w r2, [sp, #16]
1534 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #16]
1535 ; CHECK-LE-NEXT: vmov r2, s4
1536 ; CHECK-LE-NEXT: vmov.16 q0[3], r2
1537 ; CHECK-LE-NEXT: lsls r2, r1, #27
1538 ; CHECK-LE-NEXT: bpl .LBB45_5
1539 ; CHECK-LE-NEXT: .LBB45_13: @ %cond.load10
1540 ; CHECK-LE-NEXT: ldrh r2, [r0, #8]
1541 ; CHECK-LE-NEXT: strh.w r2, [sp, #12]
1542 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #12]
1543 ; CHECK-LE-NEXT: vmov r2, s4
1544 ; CHECK-LE-NEXT: vmov.16 q0[4], r2
1545 ; CHECK-LE-NEXT: lsls r2, r1, #26
1546 ; CHECK-LE-NEXT: bpl .LBB45_6
1547 ; CHECK-LE-NEXT: .LBB45_14: @ %cond.load13
1548 ; CHECK-LE-NEXT: ldrh r2, [r0, #10]
1549 ; CHECK-LE-NEXT: strh.w r2, [sp, #8]
1550 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #8]
1551 ; CHECK-LE-NEXT: vmov r2, s4
1552 ; CHECK-LE-NEXT: vmov.16 q0[5], r2
1553 ; CHECK-LE-NEXT: lsls r2, r1, #25
1554 ; CHECK-LE-NEXT: bpl .LBB45_7
1555 ; CHECK-LE-NEXT: .LBB45_15: @ %cond.load16
1556 ; CHECK-LE-NEXT: ldrh r2, [r0, #12]
1557 ; CHECK-LE-NEXT: strh.w r2, [sp, #4]
1558 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #4]
1559 ; CHECK-LE-NEXT: vmov r2, s4
1560 ; CHECK-LE-NEXT: vmov.16 q0[6], r2
1561 ; CHECK-LE-NEXT: lsls r1, r1, #24
1562 ; CHECK-LE-NEXT: bpl .LBB45_8
1563 ; CHECK-LE-NEXT: .LBB45_16: @ %cond.load19
1564 ; CHECK-LE-NEXT: ldrh r0, [r0, #14]
1565 ; CHECK-LE-NEXT: strh.w r0, [sp]
1566 ; CHECK-LE-NEXT: vldr.16 s4, [sp]
1567 ; CHECK-LE-NEXT: vmov r0, s4
1568 ; CHECK-LE-NEXT: vmov.16 q0[7], r0
1569 ; CHECK-LE-NEXT: add sp, #40
1570 ; CHECK-LE-NEXT: bx lr
1572 ; CHECK-BE-LABEL: masked_v8f16_align1_undef:
1573 ; CHECK-BE: @ %bb.0: @ %entry
1574 ; CHECK-BE-NEXT: .pad #40
1575 ; CHECK-BE-NEXT: sub sp, #40
1576 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1577 ; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr
1578 ; CHECK-BE-NEXT: @ implicit-def: $q1
1579 ; CHECK-BE-NEXT: vmrs r1, p0
1580 ; CHECK-BE-NEXT: and r2, r1, #1
1581 ; CHECK-BE-NEXT: rsbs r3, r2, #0
1582 ; CHECK-BE-NEXT: movs r2, #0
1583 ; CHECK-BE-NEXT: bfi r2, r3, #0, #1
1584 ; CHECK-BE-NEXT: ubfx r3, r1, #2, #1
1585 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1586 ; CHECK-BE-NEXT: bfi r2, r3, #1, #1
1587 ; CHECK-BE-NEXT: ubfx r3, r1, #4, #1
1588 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1589 ; CHECK-BE-NEXT: bfi r2, r3, #2, #1
1590 ; CHECK-BE-NEXT: ubfx r3, r1, #6, #1
1591 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1592 ; CHECK-BE-NEXT: bfi r2, r3, #3, #1
1593 ; CHECK-BE-NEXT: ubfx r3, r1, #8, #1
1594 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1595 ; CHECK-BE-NEXT: bfi r2, r3, #4, #1
1596 ; CHECK-BE-NEXT: ubfx r3, r1, #10, #1
1597 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1598 ; CHECK-BE-NEXT: bfi r2, r3, #5, #1
1599 ; CHECK-BE-NEXT: ubfx r3, r1, #12, #1
1600 ; CHECK-BE-NEXT: ubfx r1, r1, #14, #1
1601 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1602 ; CHECK-BE-NEXT: bfi r2, r3, #6, #1
1603 ; CHECK-BE-NEXT: rsbs r1, r1, #0
1604 ; CHECK-BE-NEXT: bfi r2, r1, #7, #1
1605 ; CHECK-BE-NEXT: uxtb r1, r2
1606 ; CHECK-BE-NEXT: lsls r2, r2, #31
1607 ; CHECK-BE-NEXT: bne .LBB45_10
1608 ; CHECK-BE-NEXT: @ %bb.1: @ %else
1609 ; CHECK-BE-NEXT: lsls r2, r1, #30
1610 ; CHECK-BE-NEXT: bmi .LBB45_11
1611 ; CHECK-BE-NEXT: .LBB45_2: @ %else2
1612 ; CHECK-BE-NEXT: lsls r2, r1, #29
1613 ; CHECK-BE-NEXT: bmi .LBB45_12
1614 ; CHECK-BE-NEXT: .LBB45_3: @ %else5
1615 ; CHECK-BE-NEXT: lsls r2, r1, #28
1616 ; CHECK-BE-NEXT: bmi .LBB45_13
1617 ; CHECK-BE-NEXT: .LBB45_4: @ %else8
1618 ; CHECK-BE-NEXT: lsls r2, r1, #27
1619 ; CHECK-BE-NEXT: bmi .LBB45_14
1620 ; CHECK-BE-NEXT: .LBB45_5: @ %else11
1621 ; CHECK-BE-NEXT: lsls r2, r1, #26
1622 ; CHECK-BE-NEXT: bmi .LBB45_15
1623 ; CHECK-BE-NEXT: .LBB45_6: @ %else14
1624 ; CHECK-BE-NEXT: lsls r2, r1, #25
1625 ; CHECK-BE-NEXT: bmi .LBB45_16
1626 ; CHECK-BE-NEXT: .LBB45_7: @ %else17
1627 ; CHECK-BE-NEXT: lsls r1, r1, #24
1628 ; CHECK-BE-NEXT: bpl .LBB45_9
1629 ; CHECK-BE-NEXT: .LBB45_8: @ %cond.load19
1630 ; CHECK-BE-NEXT: ldrh r0, [r0, #14]
1631 ; CHECK-BE-NEXT: strh.w r0, [sp]
1632 ; CHECK-BE-NEXT: vldr.16 s0, [sp]
1633 ; CHECK-BE-NEXT: vmov r0, s0
1634 ; CHECK-BE-NEXT: vmov.16 q1[7], r0
1635 ; CHECK-BE-NEXT: .LBB45_9: @ %else20
1636 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1637 ; CHECK-BE-NEXT: add sp, #40
1638 ; CHECK-BE-NEXT: bx lr
1639 ; CHECK-BE-NEXT: .LBB45_10: @ %cond.load
1640 ; CHECK-BE-NEXT: ldrh r2, [r0]
1641 ; CHECK-BE-NEXT: strh.w r2, [sp, #28]
1642 ; CHECK-BE-NEXT: vldr.16 s4, [sp, #28]
1643 ; CHECK-BE-NEXT: lsls r2, r1, #30
1644 ; CHECK-BE-NEXT: bpl .LBB45_2
1645 ; CHECK-BE-NEXT: .LBB45_11: @ %cond.load1
1646 ; CHECK-BE-NEXT: ldrh r2, [r0, #2]
1647 ; CHECK-BE-NEXT: strh.w r2, [sp, #24]
1648 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #24]
1649 ; CHECK-BE-NEXT: vmov r2, s0
1650 ; CHECK-BE-NEXT: vmov.16 q1[1], r2
1651 ; CHECK-BE-NEXT: lsls r2, r1, #29
1652 ; CHECK-BE-NEXT: bpl .LBB45_3
1653 ; CHECK-BE-NEXT: .LBB45_12: @ %cond.load4
1654 ; CHECK-BE-NEXT: ldrh r2, [r0, #4]
1655 ; CHECK-BE-NEXT: strh.w r2, [sp, #20]
1656 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #20]
1657 ; CHECK-BE-NEXT: vmov r2, s0
1658 ; CHECK-BE-NEXT: vmov.16 q1[2], r2
1659 ; CHECK-BE-NEXT: lsls r2, r1, #28
1660 ; CHECK-BE-NEXT: bpl .LBB45_4
1661 ; CHECK-BE-NEXT: .LBB45_13: @ %cond.load7
1662 ; CHECK-BE-NEXT: ldrh r2, [r0, #6]
1663 ; CHECK-BE-NEXT: strh.w r2, [sp, #16]
1664 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #16]
1665 ; CHECK-BE-NEXT: vmov r2, s0
1666 ; CHECK-BE-NEXT: vmov.16 q1[3], r2
1667 ; CHECK-BE-NEXT: lsls r2, r1, #27
1668 ; CHECK-BE-NEXT: bpl .LBB45_5
1669 ; CHECK-BE-NEXT: .LBB45_14: @ %cond.load10
1670 ; CHECK-BE-NEXT: ldrh r2, [r0, #8]
1671 ; CHECK-BE-NEXT: strh.w r2, [sp, #12]
1672 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #12]
1673 ; CHECK-BE-NEXT: vmov r2, s0
1674 ; CHECK-BE-NEXT: vmov.16 q1[4], r2
1675 ; CHECK-BE-NEXT: lsls r2, r1, #26
1676 ; CHECK-BE-NEXT: bpl .LBB45_6
1677 ; CHECK-BE-NEXT: .LBB45_15: @ %cond.load13
1678 ; CHECK-BE-NEXT: ldrh r2, [r0, #10]
1679 ; CHECK-BE-NEXT: strh.w r2, [sp, #8]
1680 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #8]
1681 ; CHECK-BE-NEXT: vmov r2, s0
1682 ; CHECK-BE-NEXT: vmov.16 q1[5], r2
1683 ; CHECK-BE-NEXT: lsls r2, r1, #25
1684 ; CHECK-BE-NEXT: bpl .LBB45_7
1685 ; CHECK-BE-NEXT: .LBB45_16: @ %cond.load16
1686 ; CHECK-BE-NEXT: ldrh r2, [r0, #12]
1687 ; CHECK-BE-NEXT: strh.w r2, [sp, #4]
1688 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #4]
1689 ; CHECK-BE-NEXT: vmov r2, s0
1690 ; CHECK-BE-NEXT: vmov.16 q1[6], r2
1691 ; CHECK-BE-NEXT: lsls r1, r1, #24
1692 ; CHECK-BE-NEXT: bmi .LBB45_8
1693 ; CHECK-BE-NEXT: b .LBB45_9
1695 %c = icmp sgt <8 x i16> %a, zeroinitializer
1696 %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 1, <8 x i1> %c, <8 x half> undef)
1700 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_other(<8 x half> *%dest, <8 x i16> %a, <8 x half> %b) {
1701 ; CHECK-LE-LABEL: masked_v8f16_align4_other:
1702 ; CHECK-LE: @ %bb.0: @ %entry
1703 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1704 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
1705 ; CHECK-LE-NEXT: vpsel q0, q0, q1
1706 ; CHECK-LE-NEXT: bx lr
1708 ; CHECK-BE-LABEL: masked_v8f16_align4_other:
1709 ; CHECK-BE: @ %bb.0: @ %entry
1710 ; CHECK-BE-NEXT: vrev64.16 q2, q1
1711 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1712 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1713 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0]
1714 ; CHECK-BE-NEXT: vpsel q1, q0, q2
1715 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1716 ; CHECK-BE-NEXT: bx lr
1718 %c = icmp sgt <8 x i16> %a, zeroinitializer
1719 %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> %b)
1723 define arm_aapcs_vfpcc i8* @masked_v8f16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
1724 ; CHECK-LE-LABEL: masked_v8f16_preinc:
1725 ; CHECK-LE: @ %bb.0: @ %entry
1726 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1727 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0, #4]
1728 ; CHECK-LE-NEXT: adds r0, #4
1729 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1730 ; CHECK-LE-NEXT: bx lr
1732 ; CHECK-BE-LABEL: masked_v8f16_preinc:
1733 ; CHECK-BE: @ %bb.0: @ %entry
1734 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1735 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1736 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0, #4]
1737 ; CHECK-BE-NEXT: adds r0, #4
1738 ; CHECK-BE-NEXT: vstrh.16 q0, [r1]
1739 ; CHECK-BE-NEXT: bx lr
1741 %z = getelementptr inbounds i8, i8* %x, i32 4
1742 %0 = bitcast i8* %z to <8 x half>*
1743 %c = icmp sgt <8 x i16> %a, zeroinitializer
1744 %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef)
1745 %2 = bitcast i8* %y to <8 x half>*
1746 store <8 x half> %1, <8 x half>* %2, align 4
1750 define arm_aapcs_vfpcc i8* @masked_v8f16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
1751 ; CHECK-LE-LABEL: masked_v8f16_postinc:
1752 ; CHECK-LE: @ %bb.0: @ %entry
1753 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1754 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
1755 ; CHECK-LE-NEXT: adds r0, #4
1756 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1757 ; CHECK-LE-NEXT: bx lr
1759 ; CHECK-BE-LABEL: masked_v8f16_postinc:
1760 ; CHECK-BE: @ %bb.0: @ %entry
1761 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1762 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1763 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0]
1764 ; CHECK-BE-NEXT: adds r0, #4
1765 ; CHECK-BE-NEXT: vstrh.16 q0, [r1]
1766 ; CHECK-BE-NEXT: bx lr
1768 %z = getelementptr inbounds i8, i8* %x, i32 4
1769 %0 = bitcast i8* %x to <8 x half>*
1770 %c = icmp sgt <8 x i16> %a, zeroinitializer
1771 %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef)
1772 %2 = bitcast i8* %y to <8 x half>*
1773 store <8 x half> %1, <8 x half>* %2, align 4
1778 define arm_aapcs_vfpcc <2 x i64> @masked_v2i64_align4_zero(<2 x i64> *%dest, <2 x i64> %a) {
1779 ; CHECK-LE-LABEL: masked_v2i64_align4_zero:
1780 ; CHECK-LE: @ %bb.0: @ %entry
1781 ; CHECK-LE-NEXT: .pad #4
1782 ; CHECK-LE-NEXT: sub sp, #4
1783 ; CHECK-LE-NEXT: vmov r3, s0
1784 ; CHECK-LE-NEXT: movs r2, #0
1785 ; CHECK-LE-NEXT: vmov r1, s1
1786 ; CHECK-LE-NEXT: vmov r12, s3
1787 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1788 ; CHECK-LE-NEXT: vmov r3, s2
1789 ; CHECK-LE-NEXT: sbcs.w r1, r2, r1
1790 ; CHECK-LE-NEXT: mov.w r1, #0
1791 ; CHECK-LE-NEXT: it lt
1792 ; CHECK-LE-NEXT: movlt r1, #1
1793 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1794 ; CHECK-LE-NEXT: sbcs.w r3, r2, r12
1795 ; CHECK-LE-NEXT: it lt
1796 ; CHECK-LE-NEXT: movlt r2, #1
1797 ; CHECK-LE-NEXT: cmp r2, #0
1798 ; CHECK-LE-NEXT: it ne
1799 ; CHECK-LE-NEXT: mvnne r2, #1
1800 ; CHECK-LE-NEXT: bfi r2, r1, #0, #1
1801 ; CHECK-LE-NEXT: and r1, r2, #3
1802 ; CHECK-LE-NEXT: lsls r2, r2, #31
1803 ; CHECK-LE-NEXT: beq .LBB49_2
1804 ; CHECK-LE-NEXT: @ %bb.1: @ %cond.load
1805 ; CHECK-LE-NEXT: vldr d1, .LCPI49_0
1806 ; CHECK-LE-NEXT: vldr d0, [r0]
1807 ; CHECK-LE-NEXT: b .LBB49_3
1808 ; CHECK-LE-NEXT: .LBB49_2:
1809 ; CHECK-LE-NEXT: vmov.i32 q0, #0x0
1810 ; CHECK-LE-NEXT: .LBB49_3: @ %else
1811 ; CHECK-LE-NEXT: lsls r1, r1, #30
1812 ; CHECK-LE-NEXT: it mi
1813 ; CHECK-LE-NEXT: vldrmi d1, [r0, #8]
1814 ; CHECK-LE-NEXT: add sp, #4
1815 ; CHECK-LE-NEXT: bx lr
1816 ; CHECK-LE-NEXT: .p2align 3
1817 ; CHECK-LE-NEXT: @ %bb.4:
1818 ; CHECK-LE-NEXT: .LCPI49_0:
1819 ; CHECK-LE-NEXT: .long 0 @ double 0
1820 ; CHECK-LE-NEXT: .long 0
1822 ; CHECK-BE-LABEL: masked_v2i64_align4_zero:
1823 ; CHECK-BE: @ %bb.0: @ %entry
1824 ; CHECK-BE-NEXT: .pad #4
1825 ; CHECK-BE-NEXT: sub sp, #4
1826 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1827 ; CHECK-BE-NEXT: movs r2, #0
1828 ; CHECK-BE-NEXT: vmov r3, s7
1829 ; CHECK-BE-NEXT: vmov r1, s6
1830 ; CHECK-BE-NEXT: vmov r12, s4
1831 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1832 ; CHECK-BE-NEXT: vmov r3, s5
1833 ; CHECK-BE-NEXT: sbcs.w r1, r2, r1
1834 ; CHECK-BE-NEXT: mov.w r1, #0
1835 ; CHECK-BE-NEXT: it lt
1836 ; CHECK-BE-NEXT: movlt r1, #1
1837 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1838 ; CHECK-BE-NEXT: sbcs.w r3, r2, r12
1839 ; CHECK-BE-NEXT: it lt
1840 ; CHECK-BE-NEXT: movlt r2, #1
1841 ; CHECK-BE-NEXT: cmp r2, #0
1842 ; CHECK-BE-NEXT: it ne
1843 ; CHECK-BE-NEXT: mvnne r2, #1
1844 ; CHECK-BE-NEXT: bfi r2, r1, #0, #1
1845 ; CHECK-BE-NEXT: and r1, r2, #3
1846 ; CHECK-BE-NEXT: lsls r2, r2, #31
1847 ; CHECK-BE-NEXT: beq .LBB49_2
1848 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
1849 ; CHECK-BE-NEXT: vldr d1, .LCPI49_0
1850 ; CHECK-BE-NEXT: vldr d0, [r0]
1851 ; CHECK-BE-NEXT: b .LBB49_3
1852 ; CHECK-BE-NEXT: .LBB49_2:
1853 ; CHECK-BE-NEXT: vmov.i32 q1, #0x0
1854 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1855 ; CHECK-BE-NEXT: .LBB49_3: @ %else
1856 ; CHECK-BE-NEXT: lsls r1, r1, #30
1857 ; CHECK-BE-NEXT: it mi
1858 ; CHECK-BE-NEXT: vldrmi d1, [r0, #8]
1859 ; CHECK-BE-NEXT: add sp, #4
1860 ; CHECK-BE-NEXT: bx lr
1861 ; CHECK-BE-NEXT: .p2align 3
1862 ; CHECK-BE-NEXT: @ %bb.4:
1863 ; CHECK-BE-NEXT: .LCPI49_0:
1864 ; CHECK-BE-NEXT: .long 0 @ double 0
1865 ; CHECK-BE-NEXT: .long 0
1867 %c = icmp sgt <2 x i64> %a, zeroinitializer
1868 %l = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %dest, i32 8, <2 x i1> %c, <2 x i64> zeroinitializer)
1872 define arm_aapcs_vfpcc <2 x double> @masked_v2f64_align4_zero(<2 x double> *%dest, <2 x double> %a, <2 x i64> %b) {
1873 ; CHECK-LE-LABEL: masked_v2f64_align4_zero:
1874 ; CHECK-LE: @ %bb.0: @ %entry
1875 ; CHECK-LE-NEXT: .pad #4
1876 ; CHECK-LE-NEXT: sub sp, #4
1877 ; CHECK-LE-NEXT: vmov r3, s4
1878 ; CHECK-LE-NEXT: movs r2, #0
1879 ; CHECK-LE-NEXT: vmov r1, s5
1880 ; CHECK-LE-NEXT: vmov r12, s7
1881 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1882 ; CHECK-LE-NEXT: vmov r3, s6
1883 ; CHECK-LE-NEXT: sbcs.w r1, r2, r1
1884 ; CHECK-LE-NEXT: mov.w r1, #0
1885 ; CHECK-LE-NEXT: it lt
1886 ; CHECK-LE-NEXT: movlt r1, #1
1887 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1888 ; CHECK-LE-NEXT: sbcs.w r3, r2, r12
1889 ; CHECK-LE-NEXT: it lt
1890 ; CHECK-LE-NEXT: movlt r2, #1
1891 ; CHECK-LE-NEXT: cmp r2, #0
1892 ; CHECK-LE-NEXT: it ne
1893 ; CHECK-LE-NEXT: mvnne r2, #1
1894 ; CHECK-LE-NEXT: bfi r2, r1, #0, #1
1895 ; CHECK-LE-NEXT: and r1, r2, #3
1896 ; CHECK-LE-NEXT: lsls r2, r2, #31
1897 ; CHECK-LE-NEXT: beq .LBB50_2
1898 ; CHECK-LE-NEXT: @ %bb.1: @ %cond.load
1899 ; CHECK-LE-NEXT: vldr d1, .LCPI50_0
1900 ; CHECK-LE-NEXT: vldr d0, [r0]
1901 ; CHECK-LE-NEXT: b .LBB50_3
1902 ; CHECK-LE-NEXT: .LBB50_2:
1903 ; CHECK-LE-NEXT: vmov.i32 q0, #0x0
1904 ; CHECK-LE-NEXT: .LBB50_3: @ %else
1905 ; CHECK-LE-NEXT: lsls r1, r1, #30
1906 ; CHECK-LE-NEXT: it mi
1907 ; CHECK-LE-NEXT: vldrmi d1, [r0, #8]
1908 ; CHECK-LE-NEXT: add sp, #4
1909 ; CHECK-LE-NEXT: bx lr
1910 ; CHECK-LE-NEXT: .p2align 3
1911 ; CHECK-LE-NEXT: @ %bb.4:
1912 ; CHECK-LE-NEXT: .LCPI50_0:
1913 ; CHECK-LE-NEXT: .long 0 @ double 0
1914 ; CHECK-LE-NEXT: .long 0
1916 ; CHECK-BE-LABEL: masked_v2f64_align4_zero:
1917 ; CHECK-BE: @ %bb.0: @ %entry
1918 ; CHECK-BE-NEXT: .pad #4
1919 ; CHECK-BE-NEXT: sub sp, #4
1920 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1921 ; CHECK-BE-NEXT: movs r2, #0
1922 ; CHECK-BE-NEXT: vmov r3, s3
1923 ; CHECK-BE-NEXT: vmov r1, s2
1924 ; CHECK-BE-NEXT: vmov r12, s0
1925 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1926 ; CHECK-BE-NEXT: vmov r3, s1
1927 ; CHECK-BE-NEXT: sbcs.w r1, r2, r1
1928 ; CHECK-BE-NEXT: mov.w r1, #0
1929 ; CHECK-BE-NEXT: it lt
1930 ; CHECK-BE-NEXT: movlt r1, #1
1931 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1932 ; CHECK-BE-NEXT: sbcs.w r3, r2, r12
1933 ; CHECK-BE-NEXT: it lt
1934 ; CHECK-BE-NEXT: movlt r2, #1
1935 ; CHECK-BE-NEXT: cmp r2, #0
1936 ; CHECK-BE-NEXT: it ne
1937 ; CHECK-BE-NEXT: mvnne r2, #1
1938 ; CHECK-BE-NEXT: bfi r2, r1, #0, #1
1939 ; CHECK-BE-NEXT: and r1, r2, #3
1940 ; CHECK-BE-NEXT: lsls r2, r2, #31
1941 ; CHECK-BE-NEXT: beq .LBB50_2
1942 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
1943 ; CHECK-BE-NEXT: vldr d1, .LCPI50_0
1944 ; CHECK-BE-NEXT: vldr d0, [r0]
1945 ; CHECK-BE-NEXT: b .LBB50_3
1946 ; CHECK-BE-NEXT: .LBB50_2:
1947 ; CHECK-BE-NEXT: vmov.i32 q1, #0x0
1948 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1949 ; CHECK-BE-NEXT: .LBB50_3: @ %else
1950 ; CHECK-BE-NEXT: lsls r1, r1, #30
1951 ; CHECK-BE-NEXT: it mi
1952 ; CHECK-BE-NEXT: vldrmi d1, [r0, #8]
1953 ; CHECK-BE-NEXT: add sp, #4
1954 ; CHECK-BE-NEXT: bx lr
1955 ; CHECK-BE-NEXT: .p2align 3
1956 ; CHECK-BE-NEXT: @ %bb.4:
1957 ; CHECK-BE-NEXT: .LCPI50_0:
1958 ; CHECK-BE-NEXT: .long 0 @ double 0
1959 ; CHECK-BE-NEXT: .long 0
1961 %c = icmp sgt <2 x i64> %b, zeroinitializer
1962 %l = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %dest, i32 8, <2 x i1> %c, <2 x double> zeroinitializer)
1966 define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16(<4 x i16> *%dest, <4 x i32> %a) {
1967 ; CHECK-LE-LABEL: anyext_v4i16:
1968 ; CHECK-LE: @ %bb.0: @ %entry
1969 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1970 ; CHECK-LE-NEXT: vldrht.u32 q0, [r0]
1971 ; CHECK-LE-NEXT: bx lr
1973 ; CHECK-BE-LABEL: anyext_v4i16:
1974 ; CHECK-BE: @ %bb.0: @ %entry
1975 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1976 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1977 ; CHECK-BE-NEXT: vldrht.u32 q1, [r0]
1978 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1979 ; CHECK-BE-NEXT: bx lr
1981 %c = icmp sgt <4 x i32> %a, zeroinitializer
1982 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
1986 define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16_align1(<4 x i16> *%dest, <4 x i32> %a) {
1987 ; CHECK-LE-LABEL: anyext_v4i16_align1:
1988 ; CHECK-LE: @ %bb.0: @ %entry
1989 ; CHECK-LE-NEXT: .pad #4
1990 ; CHECK-LE-NEXT: sub sp, #4
1991 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
1992 ; CHECK-LE-NEXT: mov.w r12, #0
1993 ; CHECK-LE-NEXT: vmrs r3, p0
1994 ; CHECK-LE-NEXT: and r1, r3, #1
1995 ; CHECK-LE-NEXT: rsbs r2, r1, #0
1996 ; CHECK-LE-NEXT: movs r1, #0
1997 ; CHECK-LE-NEXT: bfi r1, r2, #0, #1
1998 ; CHECK-LE-NEXT: ubfx r2, r3, #4, #1
1999 ; CHECK-LE-NEXT: rsbs r2, r2, #0
2000 ; CHECK-LE-NEXT: bfi r1, r2, #1, #1
2001 ; CHECK-LE-NEXT: ubfx r2, r3, #8, #1
2002 ; CHECK-LE-NEXT: rsbs r2, r2, #0
2003 ; CHECK-LE-NEXT: bfi r1, r2, #2, #1
2004 ; CHECK-LE-NEXT: ubfx r2, r3, #12, #1
2005 ; CHECK-LE-NEXT: rsbs r2, r2, #0
2006 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
2007 ; CHECK-LE-NEXT: lsls r2, r1, #31
2008 ; CHECK-LE-NEXT: beq .LBB52_2
2009 ; CHECK-LE-NEXT: @ %bb.1: @ %cond.load
2010 ; CHECK-LE-NEXT: ldrh r2, [r0]
2011 ; CHECK-LE-NEXT: vdup.32 q0, r12
2012 ; CHECK-LE-NEXT: vmov.32 q0[0], r2
2013 ; CHECK-LE-NEXT: b .LBB52_3
2014 ; CHECK-LE-NEXT: .LBB52_2:
2015 ; CHECK-LE-NEXT: vmov.i32 q0, #0x0
2016 ; CHECK-LE-NEXT: .LBB52_3: @ %else
2017 ; CHECK-LE-NEXT: lsls r2, r1, #30
2018 ; CHECK-LE-NEXT: itt mi
2019 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #2]
2020 ; CHECK-LE-NEXT: vmovmi.32 q0[1], r2
2021 ; CHECK-LE-NEXT: lsls r2, r1, #29
2022 ; CHECK-LE-NEXT: itt mi
2023 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #4]
2024 ; CHECK-LE-NEXT: vmovmi.32 q0[2], r2
2025 ; CHECK-LE-NEXT: lsls r1, r1, #28
2026 ; CHECK-LE-NEXT: itt mi
2027 ; CHECK-LE-NEXT: ldrhmi r0, [r0, #6]
2028 ; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
2029 ; CHECK-LE-NEXT: add sp, #4
2030 ; CHECK-LE-NEXT: bx lr
2032 ; CHECK-BE-LABEL: anyext_v4i16_align1:
2033 ; CHECK-BE: @ %bb.0: @ %entry
2034 ; CHECK-BE-NEXT: .pad #4
2035 ; CHECK-BE-NEXT: sub sp, #4
2036 ; CHECK-BE-NEXT: vrev64.32 q1, q0
2037 ; CHECK-BE-NEXT: mov.w r12, #0
2038 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
2039 ; CHECK-BE-NEXT: vmrs r3, p0
2040 ; CHECK-BE-NEXT: and r1, r3, #1
2041 ; CHECK-BE-NEXT: rsbs r2, r1, #0
2042 ; CHECK-BE-NEXT: movs r1, #0
2043 ; CHECK-BE-NEXT: bfi r1, r2, #0, #1
2044 ; CHECK-BE-NEXT: ubfx r2, r3, #4, #1
2045 ; CHECK-BE-NEXT: rsbs r2, r2, #0
2046 ; CHECK-BE-NEXT: bfi r1, r2, #1, #1
2047 ; CHECK-BE-NEXT: ubfx r2, r3, #8, #1
2048 ; CHECK-BE-NEXT: rsbs r2, r2, #0
2049 ; CHECK-BE-NEXT: bfi r1, r2, #2, #1
2050 ; CHECK-BE-NEXT: ubfx r2, r3, #12, #1
2051 ; CHECK-BE-NEXT: rsbs r2, r2, #0
2052 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
2053 ; CHECK-BE-NEXT: lsls r2, r1, #31
2054 ; CHECK-BE-NEXT: beq .LBB52_2
2055 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
2056 ; CHECK-BE-NEXT: ldrh r2, [r0]
2057 ; CHECK-BE-NEXT: vdup.32 q1, r12
2058 ; CHECK-BE-NEXT: vmov.32 q1[0], r2
2059 ; CHECK-BE-NEXT: b .LBB52_3
2060 ; CHECK-BE-NEXT: .LBB52_2:
2061 ; CHECK-BE-NEXT: vmov.i32 q1, #0x0
2062 ; CHECK-BE-NEXT: .LBB52_3: @ %else
2063 ; CHECK-BE-NEXT: lsls r2, r1, #30
2064 ; CHECK-BE-NEXT: itt mi
2065 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #2]
2066 ; CHECK-BE-NEXT: vmovmi.32 q1[1], r2
2067 ; CHECK-BE-NEXT: lsls r2, r1, #29
2068 ; CHECK-BE-NEXT: itt mi
2069 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #4]
2070 ; CHECK-BE-NEXT: vmovmi.32 q1[2], r2
2071 ; CHECK-BE-NEXT: lsls r1, r1, #28
2072 ; CHECK-BE-NEXT: itt mi
2073 ; CHECK-BE-NEXT: ldrhmi r0, [r0, #6]
2074 ; CHECK-BE-NEXT: vmovmi.32 q1[3], r0
2075 ; CHECK-BE-NEXT: vrev64.32 q0, q1
2076 ; CHECK-BE-NEXT: add sp, #4
2077 ; CHECK-BE-NEXT: bx lr
2079 %c = icmp sgt <4 x i32> %a, zeroinitializer
2080 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> zeroinitializer)
2084 define arm_aapcs_vfpcc <4 x i8> @anyext_v4i8(<4 x i8> *%dest, <4 x i32> %a) {
2085 ; CHECK-LE-LABEL: anyext_v4i8:
2086 ; CHECK-LE: @ %bb.0: @ %entry
2087 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
2088 ; CHECK-LE-NEXT: vldrbt.u32 q0, [r0]
2089 ; CHECK-LE-NEXT: bx lr
2091 ; CHECK-BE-LABEL: anyext_v4i8:
2092 ; CHECK-BE: @ %bb.0: @ %entry
2093 ; CHECK-BE-NEXT: vrev64.32 q1, q0
2094 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
2095 ; CHECK-BE-NEXT: vldrbt.u32 q1, [r0]
2096 ; CHECK-BE-NEXT: vrev64.32 q0, q1
2097 ; CHECK-BE-NEXT: bx lr
2099 %c = icmp sgt <4 x i32> %a, zeroinitializer
2100 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
2104 define arm_aapcs_vfpcc <8 x i8> @anyext_v8i8(<8 x i8> *%dest, <8 x i16> %a) {
2105 ; CHECK-LE-LABEL: anyext_v8i8:
2106 ; CHECK-LE: @ %bb.0: @ %entry
2107 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
2108 ; CHECK-LE-NEXT: vldrbt.u16 q0, [r0]
2109 ; CHECK-LE-NEXT: bx lr
2111 ; CHECK-BE-LABEL: anyext_v8i8:
2112 ; CHECK-BE: @ %bb.0: @ %entry
2113 ; CHECK-BE-NEXT: vrev64.16 q1, q0
2114 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
2115 ; CHECK-BE-NEXT: vldrbt.u16 q1, [r0]
2116 ; CHECK-BE-NEXT: vrev64.16 q0, q1
2117 ; CHECK-BE-NEXT: bx lr
2119 %c = icmp sgt <8 x i16> %a, zeroinitializer
2120 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
2124 define arm_aapcs_vfpcc <4 x i32> @multi_user_zext(<4 x i16> *%dest, <4 x i32> %a) {
2125 ; CHECK-LE-LABEL: multi_user_zext:
2126 ; CHECK-LE: @ %bb.0: @ %entry
2127 ; CHECK-LE-NEXT: .save {r7, lr}
2128 ; CHECK-LE-NEXT: push {r7, lr}
2129 ; CHECK-LE-NEXT: .vsave {d8, d9}
2130 ; CHECK-LE-NEXT: vpush {d8, d9}
2131 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
2132 ; CHECK-LE-NEXT: vldrht.u32 q4, [r0]
2133 ; CHECK-LE-NEXT: vmov r0, r1, d8
2134 ; CHECK-LE-NEXT: vmov r2, r3, d9
2135 ; CHECK-LE-NEXT: bl foo
2136 ; CHECK-LE-NEXT: vmovlb.u16 q0, q4
2137 ; CHECK-LE-NEXT: vpop {d8, d9}
2138 ; CHECK-LE-NEXT: pop {r7, pc}
2140 ; CHECK-BE-LABEL: multi_user_zext:
2141 ; CHECK-BE: @ %bb.0: @ %entry
2142 ; CHECK-BE-NEXT: .save {r7, lr}
2143 ; CHECK-BE-NEXT: push {r7, lr}
2144 ; CHECK-BE-NEXT: .vsave {d8, d9}
2145 ; CHECK-BE-NEXT: vpush {d8, d9}
2146 ; CHECK-BE-NEXT: vrev64.32 q1, q0
2147 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
2148 ; CHECK-BE-NEXT: vldrht.u32 q4, [r0]
2149 ; CHECK-BE-NEXT: vrev64.32 q0, q4
2150 ; CHECK-BE-NEXT: vmov r1, r0, d0
2151 ; CHECK-BE-NEXT: vmov r3, r2, d1
2152 ; CHECK-BE-NEXT: bl foo
2153 ; CHECK-BE-NEXT: vmovlb.u16 q1, q4
2154 ; CHECK-BE-NEXT: vrev64.32 q0, q1
2155 ; CHECK-BE-NEXT: vpop {d8, d9}
2156 ; CHECK-BE-NEXT: pop {r7, pc}
2158 %c = icmp sgt <4 x i32> %a, zeroinitializer
2159 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
2160 call void @foo(<4 x i16> %l)
2161 %ext = zext <4 x i16> %l to <4 x i32>
2165 define arm_aapcs_vfpcc <4 x i32> @multi_user_sext(<4 x i16> *%dest, <4 x i32> %a) {
2166 ; CHECK-LE-LABEL: multi_user_sext:
2167 ; CHECK-LE: @ %bb.0: @ %entry
2168 ; CHECK-LE-NEXT: .save {r7, lr}
2169 ; CHECK-LE-NEXT: push {r7, lr}
2170 ; CHECK-LE-NEXT: .vsave {d8, d9}
2171 ; CHECK-LE-NEXT: vpush {d8, d9}
2172 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
2173 ; CHECK-LE-NEXT: vldrht.u32 q4, [r0]
2174 ; CHECK-LE-NEXT: vmov r0, r1, d8
2175 ; CHECK-LE-NEXT: vmov r2, r3, d9
2176 ; CHECK-LE-NEXT: bl foo
2177 ; CHECK-LE-NEXT: vmovlb.s16 q0, q4
2178 ; CHECK-LE-NEXT: vpop {d8, d9}
2179 ; CHECK-LE-NEXT: pop {r7, pc}
2181 ; CHECK-BE-LABEL: multi_user_sext:
2182 ; CHECK-BE: @ %bb.0: @ %entry
2183 ; CHECK-BE-NEXT: .save {r7, lr}
2184 ; CHECK-BE-NEXT: push {r7, lr}
2185 ; CHECK-BE-NEXT: .vsave {d8, d9}
2186 ; CHECK-BE-NEXT: vpush {d8, d9}
2187 ; CHECK-BE-NEXT: vrev64.32 q1, q0
2188 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
2189 ; CHECK-BE-NEXT: vldrht.u32 q4, [r0]
2190 ; CHECK-BE-NEXT: vrev64.32 q0, q4
2191 ; CHECK-BE-NEXT: vmov r1, r0, d0
2192 ; CHECK-BE-NEXT: vmov r3, r2, d1
2193 ; CHECK-BE-NEXT: bl foo
2194 ; CHECK-BE-NEXT: vmovlb.s16 q1, q4
2195 ; CHECK-BE-NEXT: vrev64.32 q0, q1
2196 ; CHECK-BE-NEXT: vpop {d8, d9}
2197 ; CHECK-BE-NEXT: pop {r7, pc}
2199 %c = icmp sgt <4 x i32> %a, zeroinitializer
2200 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
2201 call void @foo(<4 x i16> %l)
2202 %ext = sext <4 x i16> %l to <4 x i32>
2206 declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
2207 declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
2208 declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
2209 declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
2210 declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
2211 declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
2212 declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
2213 declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
2214 declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)
2215 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
2216 declare void @foo(<4 x i16>)