1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-LE
3 ; RUN: llc -mtriple=thumbebv8.1m.main-none-none-eabi -mattr=+mve,+fullfp16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK-BE
5 define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_zero(<4 x i32> *%dest, <4 x i32> %a) {
6 ; CHECK-LE-LABEL: masked_v4i32_align4_zero:
7 ; CHECK-LE: @ %bb.0: @ %entry
8 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
9 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
10 ; CHECK-LE-NEXT: bx lr
12 ; CHECK-BE-LABEL: masked_v4i32_align4_zero:
13 ; CHECK-BE: @ %bb.0: @ %entry
14 ; CHECK-BE-NEXT: vrev64.32 q1, q0
15 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
16 ; CHECK-BE-NEXT: vldrwt.u32 q1, [r0]
17 ; CHECK-BE-NEXT: vrev64.32 q0, q1
18 ; CHECK-BE-NEXT: bx lr
20 %c = icmp sgt <4 x i32> %a, zeroinitializer
21 %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> zeroinitializer)
25 define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_undef(<4 x i32> *%dest, <4 x i32> %a) {
26 ; CHECK-LE-LABEL: masked_v4i32_align4_undef:
27 ; CHECK-LE: @ %bb.0: @ %entry
28 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
29 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
30 ; CHECK-LE-NEXT: bx lr
32 ; CHECK-BE-LABEL: masked_v4i32_align4_undef:
33 ; CHECK-BE: @ %bb.0: @ %entry
34 ; CHECK-BE-NEXT: vrev64.32 q1, q0
35 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
36 ; CHECK-BE-NEXT: vldrwt.u32 q1, [r0]
37 ; CHECK-BE-NEXT: vrev64.32 q0, q1
38 ; CHECK-BE-NEXT: bx lr
40 %c = icmp sgt <4 x i32> %a, zeroinitializer
41 %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> undef)
45 define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align1_undef(<4 x i32> *%dest, <4 x i32> %a) {
46 ; CHECK-LE-LABEL: masked_v4i32_align1_undef:
47 ; CHECK-LE: @ %bb.0: @ %entry
48 ; CHECK-LE-NEXT: .pad #4
49 ; CHECK-LE-NEXT: sub sp, #4
50 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
51 ; CHECK-LE-NEXT: @ implicit-def: $q0
52 ; CHECK-LE-NEXT: vmrs r2, p0
53 ; CHECK-LE-NEXT: and r1, r2, #1
54 ; CHECK-LE-NEXT: rsbs r3, r1, #0
55 ; CHECK-LE-NEXT: movs r1, #0
56 ; CHECK-LE-NEXT: bfi r1, r3, #0, #1
57 ; CHECK-LE-NEXT: ubfx r3, r2, #4, #1
58 ; CHECK-LE-NEXT: rsbs r3, r3, #0
59 ; CHECK-LE-NEXT: bfi r1, r3, #1, #1
60 ; CHECK-LE-NEXT: ubfx r3, r2, #8, #1
61 ; CHECK-LE-NEXT: ubfx r2, r2, #12, #1
62 ; CHECK-LE-NEXT: rsbs r3, r3, #0
63 ; CHECK-LE-NEXT: bfi r1, r3, #2, #1
64 ; CHECK-LE-NEXT: rsbs r2, r2, #0
65 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
66 ; CHECK-LE-NEXT: lsls r2, r1, #31
67 ; CHECK-LE-NEXT: itt ne
68 ; CHECK-LE-NEXT: ldrne r2, [r0]
69 ; CHECK-LE-NEXT: vmovne.32 q0[0], r2
70 ; CHECK-LE-NEXT: lsls r2, r1, #30
71 ; CHECK-LE-NEXT: itt mi
72 ; CHECK-LE-NEXT: ldrmi r2, [r0, #4]
73 ; CHECK-LE-NEXT: vmovmi.32 q0[1], r2
74 ; CHECK-LE-NEXT: lsls r2, r1, #29
75 ; CHECK-LE-NEXT: itt mi
76 ; CHECK-LE-NEXT: ldrmi r2, [r0, #8]
77 ; CHECK-LE-NEXT: vmovmi.32 q0[2], r2
78 ; CHECK-LE-NEXT: lsls r1, r1, #28
79 ; CHECK-LE-NEXT: itt mi
80 ; CHECK-LE-NEXT: ldrmi r0, [r0, #12]
81 ; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
82 ; CHECK-LE-NEXT: add sp, #4
83 ; CHECK-LE-NEXT: bx lr
85 ; CHECK-BE-LABEL: masked_v4i32_align1_undef:
86 ; CHECK-BE: @ %bb.0: @ %entry
87 ; CHECK-BE-NEXT: .pad #4
88 ; CHECK-BE-NEXT: sub sp, #4
89 ; CHECK-BE-NEXT: vrev64.32 q1, q0
90 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
91 ; CHECK-BE-NEXT: @ implicit-def: $q1
92 ; CHECK-BE-NEXT: vmrs r2, p0
93 ; CHECK-BE-NEXT: ubfx r1, r2, #12, #1
94 ; CHECK-BE-NEXT: rsbs r3, r1, #0
95 ; CHECK-BE-NEXT: movs r1, #0
96 ; CHECK-BE-NEXT: bfi r1, r3, #0, #1
97 ; CHECK-BE-NEXT: ubfx r3, r2, #8, #1
98 ; CHECK-BE-NEXT: rsbs r3, r3, #0
99 ; CHECK-BE-NEXT: bfi r1, r3, #1, #1
100 ; CHECK-BE-NEXT: ubfx r3, r2, #4, #1
101 ; CHECK-BE-NEXT: and r2, r2, #1
102 ; CHECK-BE-NEXT: rsbs r3, r3, #0
103 ; CHECK-BE-NEXT: bfi r1, r3, #2, #1
104 ; CHECK-BE-NEXT: rsbs r2, r2, #0
105 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
106 ; CHECK-BE-NEXT: lsls r2, r1, #28
107 ; CHECK-BE-NEXT: itt mi
108 ; CHECK-BE-NEXT: ldrmi r2, [r0]
109 ; CHECK-BE-NEXT: vmovmi.32 q1[0], r2
110 ; CHECK-BE-NEXT: lsls r2, r1, #29
111 ; CHECK-BE-NEXT: itt mi
112 ; CHECK-BE-NEXT: ldrmi r2, [r0, #4]
113 ; CHECK-BE-NEXT: vmovmi.32 q1[1], r2
114 ; CHECK-BE-NEXT: lsls r2, r1, #30
115 ; CHECK-BE-NEXT: itt mi
116 ; CHECK-BE-NEXT: ldrmi r2, [r0, #8]
117 ; CHECK-BE-NEXT: vmovmi.32 q1[2], r2
118 ; CHECK-BE-NEXT: lsls r1, r1, #31
119 ; CHECK-BE-NEXT: itt ne
120 ; CHECK-BE-NEXT: ldrne r0, [r0, #12]
121 ; CHECK-BE-NEXT: vmovne.32 q1[3], r0
122 ; CHECK-BE-NEXT: vrev64.32 q0, q1
123 ; CHECK-BE-NEXT: add sp, #4
124 ; CHECK-BE-NEXT: bx lr
126 %c = icmp sgt <4 x i32> %a, zeroinitializer
127 %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 1, <4 x i1> %c, <4 x i32> undef)
131 define arm_aapcs_vfpcc <4 x i32> @masked_v4i32_align4_other(<4 x i32> *%dest, <4 x i32> %a) {
132 ; CHECK-LE-LABEL: masked_v4i32_align4_other:
133 ; CHECK-LE: @ %bb.0: @ %entry
134 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
135 ; CHECK-LE-NEXT: vldrwt.u32 q1, [r0]
136 ; CHECK-LE-NEXT: vpsel q0, q1, q0
137 ; CHECK-LE-NEXT: bx lr
139 ; CHECK-BE-LABEL: masked_v4i32_align4_other:
140 ; CHECK-BE: @ %bb.0: @ %entry
141 ; CHECK-BE-NEXT: vrev64.32 q1, q0
142 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
143 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0]
144 ; CHECK-BE-NEXT: vpsel q1, q0, q1
145 ; CHECK-BE-NEXT: vrev64.32 q0, q1
146 ; CHECK-BE-NEXT: bx lr
148 %c = icmp sgt <4 x i32> %a, zeroinitializer
149 %l = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %dest, i32 4, <4 x i1> %c, <4 x i32> %a)
153 define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_zero(<4 x i16> *%dest, <4 x i32> %a) {
154 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_zero:
155 ; CHECK-LE: @ %bb.0: @ %entry
156 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
157 ; CHECK-LE-NEXT: vldrht.u32 q0, [r0]
158 ; CHECK-LE-NEXT: bx lr
160 ; CHECK-BE-LABEL: zext16_masked_v4i32_align2_zero:
161 ; CHECK-BE: @ %bb.0: @ %entry
162 ; CHECK-BE-NEXT: vrev64.32 q1, q0
163 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
164 ; CHECK-BE-NEXT: vldrht.u32 q1, [r0]
165 ; CHECK-BE-NEXT: vrev64.32 q0, q1
166 ; CHECK-BE-NEXT: bx lr
168 %c = icmp sgt <4 x i32> %a, zeroinitializer
169 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
170 %ext = zext <4 x i16> %l to <4 x i32>
174 define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_undef(<4 x i16> *%dest, <4 x i32> %a) {
175 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_undef:
176 ; CHECK-LE: @ %bb.0: @ %entry
177 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
178 ; CHECK-LE-NEXT: vldrht.u32 q0, [r0]
179 ; CHECK-LE-NEXT: bx lr
181 ; CHECK-BE-LABEL: zext16_masked_v4i32_align2_undef:
182 ; CHECK-BE: @ %bb.0: @ %entry
183 ; CHECK-BE-NEXT: vrev64.32 q1, q0
184 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
185 ; CHECK-BE-NEXT: vldrht.u32 q1, [r0]
186 ; CHECK-BE-NEXT: vrev64.32 q0, q1
187 ; CHECK-BE-NEXT: bx lr
189 %c = icmp sgt <4 x i32> %a, zeroinitializer
190 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> undef)
191 %ext = zext <4 x i16> %l to <4 x i32>
195 define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align1_undef(<4 x i16> *%dest, <4 x i32> %a) {
196 ; CHECK-LE-LABEL: zext16_masked_v4i32_align1_undef:
197 ; CHECK-LE: @ %bb.0: @ %entry
198 ; CHECK-LE-NEXT: .pad #4
199 ; CHECK-LE-NEXT: sub sp, #4
200 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
201 ; CHECK-LE-NEXT: @ implicit-def: $q0
202 ; CHECK-LE-NEXT: vmrs r2, p0
203 ; CHECK-LE-NEXT: and r1, r2, #1
204 ; CHECK-LE-NEXT: rsbs r3, r1, #0
205 ; CHECK-LE-NEXT: movs r1, #0
206 ; CHECK-LE-NEXT: bfi r1, r3, #0, #1
207 ; CHECK-LE-NEXT: ubfx r3, r2, #4, #1
208 ; CHECK-LE-NEXT: rsbs r3, r3, #0
209 ; CHECK-LE-NEXT: bfi r1, r3, #1, #1
210 ; CHECK-LE-NEXT: ubfx r3, r2, #8, #1
211 ; CHECK-LE-NEXT: ubfx r2, r2, #12, #1
212 ; CHECK-LE-NEXT: rsbs r3, r3, #0
213 ; CHECK-LE-NEXT: bfi r1, r3, #2, #1
214 ; CHECK-LE-NEXT: rsbs r2, r2, #0
215 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
216 ; CHECK-LE-NEXT: lsls r2, r1, #31
217 ; CHECK-LE-NEXT: itt ne
218 ; CHECK-LE-NEXT: ldrhne r2, [r0]
219 ; CHECK-LE-NEXT: vmovne.32 q0[0], r2
220 ; CHECK-LE-NEXT: lsls r2, r1, #30
221 ; CHECK-LE-NEXT: itt mi
222 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #2]
223 ; CHECK-LE-NEXT: vmovmi.32 q0[1], r2
224 ; CHECK-LE-NEXT: lsls r2, r1, #29
225 ; CHECK-LE-NEXT: itt mi
226 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #4]
227 ; CHECK-LE-NEXT: vmovmi.32 q0[2], r2
228 ; CHECK-LE-NEXT: lsls r1, r1, #28
229 ; CHECK-LE-NEXT: itt mi
230 ; CHECK-LE-NEXT: ldrhmi r0, [r0, #6]
231 ; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
232 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
233 ; CHECK-LE-NEXT: add sp, #4
234 ; CHECK-LE-NEXT: bx lr
236 ; CHECK-BE-LABEL: zext16_masked_v4i32_align1_undef:
237 ; CHECK-BE: @ %bb.0: @ %entry
238 ; CHECK-BE-NEXT: .pad #4
239 ; CHECK-BE-NEXT: sub sp, #4
240 ; CHECK-BE-NEXT: vrev64.32 q1, q0
241 ; CHECK-BE-NEXT: @ implicit-def: $q0
242 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
243 ; CHECK-BE-NEXT: vmrs r2, p0
244 ; CHECK-BE-NEXT: ubfx r1, r2, #12, #1
245 ; CHECK-BE-NEXT: rsbs r3, r1, #0
246 ; CHECK-BE-NEXT: movs r1, #0
247 ; CHECK-BE-NEXT: bfi r1, r3, #0, #1
248 ; CHECK-BE-NEXT: ubfx r3, r2, #8, #1
249 ; CHECK-BE-NEXT: rsbs r3, r3, #0
250 ; CHECK-BE-NEXT: bfi r1, r3, #1, #1
251 ; CHECK-BE-NEXT: ubfx r3, r2, #4, #1
252 ; CHECK-BE-NEXT: and r2, r2, #1
253 ; CHECK-BE-NEXT: rsbs r3, r3, #0
254 ; CHECK-BE-NEXT: bfi r1, r3, #2, #1
255 ; CHECK-BE-NEXT: rsbs r2, r2, #0
256 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
257 ; CHECK-BE-NEXT: lsls r2, r1, #28
258 ; CHECK-BE-NEXT: itt mi
259 ; CHECK-BE-NEXT: ldrhmi r2, [r0]
260 ; CHECK-BE-NEXT: vmovmi.32 q0[0], r2
261 ; CHECK-BE-NEXT: lsls r2, r1, #29
262 ; CHECK-BE-NEXT: itt mi
263 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #2]
264 ; CHECK-BE-NEXT: vmovmi.32 q0[1], r2
265 ; CHECK-BE-NEXT: lsls r2, r1, #30
266 ; CHECK-BE-NEXT: itt mi
267 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #4]
268 ; CHECK-BE-NEXT: vmovmi.32 q0[2], r2
269 ; CHECK-BE-NEXT: lsls r1, r1, #31
270 ; CHECK-BE-NEXT: itt ne
271 ; CHECK-BE-NEXT: ldrhne r0, [r0, #6]
272 ; CHECK-BE-NEXT: vmovne.32 q0[3], r0
273 ; CHECK-BE-NEXT: vmovlb.s16 q1, q0
274 ; CHECK-BE-NEXT: vrev64.32 q0, q1
275 ; CHECK-BE-NEXT: add sp, #4
276 ; CHECK-BE-NEXT: bx lr
278 %c = icmp sgt <4 x i32> %a, zeroinitializer
279 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> undef)
280 %ext = sext <4 x i16> %l to <4 x i32>
284 define arm_aapcs_vfpcc <4 x i32> @zext16_masked_v4i32_align2_other(<4 x i16> *%dest, <4 x i16> %a) {
285 ; CHECK-LE-LABEL: zext16_masked_v4i32_align2_other:
286 ; CHECK-LE: @ %bb.0: @ %entry
287 ; CHECK-LE-NEXT: vmovlb.u16 q1, q0
288 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
289 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
290 ; CHECK-LE-NEXT: vldrht.u32 q0, [r0]
291 ; CHECK-LE-NEXT: vpsel q0, q0, q1
292 ; CHECK-LE-NEXT: bx lr
294 ; CHECK-BE-LABEL: zext16_masked_v4i32_align2_other:
295 ; CHECK-BE: @ %bb.0: @ %entry
296 ; CHECK-BE-NEXT: vrev64.32 q1, q0
297 ; CHECK-BE-NEXT: vmovlb.u16 q0, q1
298 ; CHECK-BE-NEXT: vmovlb.s16 q1, q1
299 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
300 ; CHECK-BE-NEXT: vldrht.u32 q1, [r0]
301 ; CHECK-BE-NEXT: vpsel q1, q1, q0
302 ; CHECK-BE-NEXT: vrev64.32 q0, q1
303 ; CHECK-BE-NEXT: bx lr
305 %c = icmp sgt <4 x i16> %a, zeroinitializer
306 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> %a)
307 %ext = zext <4 x i16> %l to <4 x i32>
311 define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_zero(<4 x i16> *%dest, <4 x i32> %a) {
312 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_zero:
313 ; CHECK-LE: @ %bb.0: @ %entry
314 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
315 ; CHECK-LE-NEXT: vldrht.s32 q0, [r0]
316 ; CHECK-LE-NEXT: bx lr
318 ; CHECK-BE-LABEL: sext16_masked_v4i32_align2_zero:
319 ; CHECK-BE: @ %bb.0: @ %entry
320 ; CHECK-BE-NEXT: vrev64.32 q1, q0
321 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
322 ; CHECK-BE-NEXT: vldrht.s32 q1, [r0]
323 ; CHECK-BE-NEXT: vrev64.32 q0, q1
324 ; CHECK-BE-NEXT: bx lr
326 %c = icmp sgt <4 x i32> %a, zeroinitializer
327 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
328 %sext = sext <4 x i16> %l to <4 x i32>
332 define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_undef(<4 x i16> *%dest, <4 x i32> %a) {
333 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_undef:
334 ; CHECK-LE: @ %bb.0: @ %entry
335 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
336 ; CHECK-LE-NEXT: vldrht.s32 q0, [r0]
337 ; CHECK-LE-NEXT: bx lr
339 ; CHECK-BE-LABEL: sext16_masked_v4i32_align2_undef:
340 ; CHECK-BE: @ %bb.0: @ %entry
341 ; CHECK-BE-NEXT: vrev64.32 q1, q0
342 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
343 ; CHECK-BE-NEXT: vldrht.s32 q1, [r0]
344 ; CHECK-BE-NEXT: vrev64.32 q0, q1
345 ; CHECK-BE-NEXT: bx lr
347 %c = icmp sgt <4 x i32> %a, zeroinitializer
348 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> undef)
349 %sext = sext <4 x i16> %l to <4 x i32>
353 define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align1_undef(<4 x i16> *%dest, <4 x i32> %a) {
354 ; CHECK-LE-LABEL: sext16_masked_v4i32_align1_undef:
355 ; CHECK-LE: @ %bb.0: @ %entry
356 ; CHECK-LE-NEXT: .pad #4
357 ; CHECK-LE-NEXT: sub sp, #4
358 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
359 ; CHECK-LE-NEXT: @ implicit-def: $q0
360 ; CHECK-LE-NEXT: vmrs r2, p0
361 ; CHECK-LE-NEXT: and r1, r2, #1
362 ; CHECK-LE-NEXT: rsbs r3, r1, #0
363 ; CHECK-LE-NEXT: movs r1, #0
364 ; CHECK-LE-NEXT: bfi r1, r3, #0, #1
365 ; CHECK-LE-NEXT: ubfx r3, r2, #4, #1
366 ; CHECK-LE-NEXT: rsbs r3, r3, #0
367 ; CHECK-LE-NEXT: bfi r1, r3, #1, #1
368 ; CHECK-LE-NEXT: ubfx r3, r2, #8, #1
369 ; CHECK-LE-NEXT: ubfx r2, r2, #12, #1
370 ; CHECK-LE-NEXT: rsbs r3, r3, #0
371 ; CHECK-LE-NEXT: bfi r1, r3, #2, #1
372 ; CHECK-LE-NEXT: rsbs r2, r2, #0
373 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
374 ; CHECK-LE-NEXT: lsls r2, r1, #31
375 ; CHECK-LE-NEXT: itt ne
376 ; CHECK-LE-NEXT: ldrhne r2, [r0]
377 ; CHECK-LE-NEXT: vmovne.32 q0[0], r2
378 ; CHECK-LE-NEXT: lsls r2, r1, #30
379 ; CHECK-LE-NEXT: itt mi
380 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #2]
381 ; CHECK-LE-NEXT: vmovmi.32 q0[1], r2
382 ; CHECK-LE-NEXT: lsls r2, r1, #29
383 ; CHECK-LE-NEXT: itt mi
384 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #4]
385 ; CHECK-LE-NEXT: vmovmi.32 q0[2], r2
386 ; CHECK-LE-NEXT: lsls r1, r1, #28
387 ; CHECK-LE-NEXT: itt mi
388 ; CHECK-LE-NEXT: ldrhmi r0, [r0, #6]
389 ; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
390 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
391 ; CHECK-LE-NEXT: add sp, #4
392 ; CHECK-LE-NEXT: bx lr
394 ; CHECK-BE-LABEL: sext16_masked_v4i32_align1_undef:
395 ; CHECK-BE: @ %bb.0: @ %entry
396 ; CHECK-BE-NEXT: .pad #4
397 ; CHECK-BE-NEXT: sub sp, #4
398 ; CHECK-BE-NEXT: vrev64.32 q1, q0
399 ; CHECK-BE-NEXT: @ implicit-def: $q0
400 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
401 ; CHECK-BE-NEXT: vmrs r2, p0
402 ; CHECK-BE-NEXT: ubfx r1, r2, #12, #1
403 ; CHECK-BE-NEXT: rsbs r3, r1, #0
404 ; CHECK-BE-NEXT: movs r1, #0
405 ; CHECK-BE-NEXT: bfi r1, r3, #0, #1
406 ; CHECK-BE-NEXT: ubfx r3, r2, #8, #1
407 ; CHECK-BE-NEXT: rsbs r3, r3, #0
408 ; CHECK-BE-NEXT: bfi r1, r3, #1, #1
409 ; CHECK-BE-NEXT: ubfx r3, r2, #4, #1
410 ; CHECK-BE-NEXT: and r2, r2, #1
411 ; CHECK-BE-NEXT: rsbs r3, r3, #0
412 ; CHECK-BE-NEXT: bfi r1, r3, #2, #1
413 ; CHECK-BE-NEXT: rsbs r2, r2, #0
414 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
415 ; CHECK-BE-NEXT: lsls r2, r1, #28
416 ; CHECK-BE-NEXT: itt mi
417 ; CHECK-BE-NEXT: ldrhmi r2, [r0]
418 ; CHECK-BE-NEXT: vmovmi.32 q0[0], r2
419 ; CHECK-BE-NEXT: lsls r2, r1, #29
420 ; CHECK-BE-NEXT: itt mi
421 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #2]
422 ; CHECK-BE-NEXT: vmovmi.32 q0[1], r2
423 ; CHECK-BE-NEXT: lsls r2, r1, #30
424 ; CHECK-BE-NEXT: itt mi
425 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #4]
426 ; CHECK-BE-NEXT: vmovmi.32 q0[2], r2
427 ; CHECK-BE-NEXT: lsls r1, r1, #31
428 ; CHECK-BE-NEXT: itt ne
429 ; CHECK-BE-NEXT: ldrhne r0, [r0, #6]
430 ; CHECK-BE-NEXT: vmovne.32 q0[3], r0
431 ; CHECK-BE-NEXT: vmovlb.s16 q1, q0
432 ; CHECK-BE-NEXT: vrev64.32 q0, q1
433 ; CHECK-BE-NEXT: add sp, #4
434 ; CHECK-BE-NEXT: bx lr
436 %c = icmp sgt <4 x i32> %a, zeroinitializer
437 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> undef)
438 %sext = sext <4 x i16> %l to <4 x i32>
442 define arm_aapcs_vfpcc <4 x i32> @sext16_masked_v4i32_align2_other(<4 x i16> *%dest, <4 x i16> %a) {
443 ; CHECK-LE-LABEL: sext16_masked_v4i32_align2_other:
444 ; CHECK-LE: @ %bb.0: @ %entry
445 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
446 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
447 ; CHECK-LE-NEXT: vldrht.s32 q1, [r0]
448 ; CHECK-LE-NEXT: vpsel q0, q1, q0
449 ; CHECK-LE-NEXT: bx lr
451 ; CHECK-BE-LABEL: sext16_masked_v4i32_align2_other:
452 ; CHECK-BE: @ %bb.0: @ %entry
453 ; CHECK-BE-NEXT: vrev64.32 q1, q0
454 ; CHECK-BE-NEXT: vmovlb.s16 q0, q1
455 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
456 ; CHECK-BE-NEXT: vldrht.s32 q1, [r0]
457 ; CHECK-BE-NEXT: vpsel q1, q1, q0
458 ; CHECK-BE-NEXT: vrev64.32 q0, q1
459 ; CHECK-BE-NEXT: bx lr
461 %c = icmp sgt <4 x i16> %a, zeroinitializer
462 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> %a)
463 %sext = sext <4 x i16> %l to <4 x i32>
467 define arm_aapcs_vfpcc i8* @masked_v4i32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
468 ; CHECK-LE-LABEL: masked_v4i32_preinc:
469 ; CHECK-LE: @ %bb.0: @ %entry
470 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
471 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0, #4]!
472 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
473 ; CHECK-LE-NEXT: bx lr
475 ; CHECK-BE-LABEL: masked_v4i32_preinc:
476 ; CHECK-BE: @ %bb.0: @ %entry
477 ; CHECK-BE-NEXT: vrev64.32 q1, q0
478 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
479 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0, #4]!
480 ; CHECK-BE-NEXT: vstrw.32 q0, [r1]
481 ; CHECK-BE-NEXT: bx lr
483 %z = getelementptr inbounds i8, i8* %x, i32 4
484 %0 = bitcast i8* %z to <4 x i32>*
485 %c = icmp sgt <4 x i32> %a, zeroinitializer
486 %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
487 %2 = bitcast i8* %y to <4 x i32>*
488 store <4 x i32> %1, <4 x i32>* %2, align 4
492 define arm_aapcs_vfpcc i8* @masked_v4i32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
493 ; CHECK-LE-LABEL: masked_v4i32_postinc:
494 ; CHECK-LE: @ %bb.0: @ %entry
495 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
496 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0], #4
497 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
498 ; CHECK-LE-NEXT: bx lr
500 ; CHECK-BE-LABEL: masked_v4i32_postinc:
501 ; CHECK-BE: @ %bb.0: @ %entry
502 ; CHECK-BE-NEXT: vrev64.32 q1, q0
503 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
504 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0], #4
505 ; CHECK-BE-NEXT: vstrw.32 q0, [r1]
506 ; CHECK-BE-NEXT: bx lr
508 %z = getelementptr inbounds i8, i8* %x, i32 4
509 %0 = bitcast i8* %x to <4 x i32>*
510 %c = icmp sgt <4 x i32> %a, zeroinitializer
511 %1 = call <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>* %0, i32 4, <4 x i1> %c, <4 x i32> undef)
512 %2 = bitcast i8* %y to <4 x i32>*
513 store <4 x i32> %1, <4 x i32>* %2, align 4
517 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_zero(<8 x i16> *%dest, <8 x i16> %a) {
518 ; CHECK-LE-LABEL: masked_v8i16_align4_zero:
519 ; CHECK-LE: @ %bb.0: @ %entry
520 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
521 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
522 ; CHECK-LE-NEXT: bx lr
524 ; CHECK-BE-LABEL: masked_v8i16_align4_zero:
525 ; CHECK-BE: @ %bb.0: @ %entry
526 ; CHECK-BE-NEXT: vrev64.16 q1, q0
527 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
528 ; CHECK-BE-NEXT: vldrht.u16 q1, [r0]
529 ; CHECK-BE-NEXT: vrev64.16 q0, q1
530 ; CHECK-BE-NEXT: bx lr
532 %c = icmp sgt <8 x i16> %a, zeroinitializer
533 %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> zeroinitializer)
537 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align2_undef(<8 x i16> *%dest, <8 x i16> %a) {
538 ; CHECK-LE-LABEL: masked_v8i16_align2_undef:
539 ; CHECK-LE: @ %bb.0: @ %entry
540 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
541 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
542 ; CHECK-LE-NEXT: bx lr
544 ; CHECK-BE-LABEL: masked_v8i16_align2_undef:
545 ; CHECK-BE: @ %bb.0: @ %entry
546 ; CHECK-BE-NEXT: vrev64.16 q1, q0
547 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
548 ; CHECK-BE-NEXT: vldrht.u16 q1, [r0]
549 ; CHECK-BE-NEXT: vrev64.16 q0, q1
550 ; CHECK-BE-NEXT: bx lr
552 %c = icmp sgt <8 x i16> %a, zeroinitializer
553 %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> undef)
557 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8 x i16> %a) {
558 ; CHECK-LE-LABEL: masked_v8i16_align1_undef:
559 ; CHECK-LE: @ %bb.0: @ %entry
560 ; CHECK-LE-NEXT: .pad #4
561 ; CHECK-LE-NEXT: sub sp, #4
562 ; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr
563 ; CHECK-LE-NEXT: @ implicit-def: $q0
564 ; CHECK-LE-NEXT: vmrs r1, p0
565 ; CHECK-LE-NEXT: and r2, r1, #1
566 ; CHECK-LE-NEXT: rsbs r3, r2, #0
567 ; CHECK-LE-NEXT: movs r2, #0
568 ; CHECK-LE-NEXT: bfi r2, r3, #0, #1
569 ; CHECK-LE-NEXT: ubfx r3, r1, #2, #1
570 ; CHECK-LE-NEXT: rsbs r3, r3, #0
571 ; CHECK-LE-NEXT: bfi r2, r3, #1, #1
572 ; CHECK-LE-NEXT: ubfx r3, r1, #4, #1
573 ; CHECK-LE-NEXT: rsbs r3, r3, #0
574 ; CHECK-LE-NEXT: bfi r2, r3, #2, #1
575 ; CHECK-LE-NEXT: ubfx r3, r1, #6, #1
576 ; CHECK-LE-NEXT: rsbs r3, r3, #0
577 ; CHECK-LE-NEXT: bfi r2, r3, #3, #1
578 ; CHECK-LE-NEXT: ubfx r3, r1, #8, #1
579 ; CHECK-LE-NEXT: rsbs r3, r3, #0
580 ; CHECK-LE-NEXT: bfi r2, r3, #4, #1
581 ; CHECK-LE-NEXT: ubfx r3, r1, #10, #1
582 ; CHECK-LE-NEXT: rsbs r3, r3, #0
583 ; CHECK-LE-NEXT: bfi r2, r3, #5, #1
584 ; CHECK-LE-NEXT: ubfx r3, r1, #12, #1
585 ; CHECK-LE-NEXT: ubfx r1, r1, #14, #1
586 ; CHECK-LE-NEXT: rsbs r3, r3, #0
587 ; CHECK-LE-NEXT: bfi r2, r3, #6, #1
588 ; CHECK-LE-NEXT: rsbs r1, r1, #0
589 ; CHECK-LE-NEXT: bfi r2, r1, #7, #1
590 ; CHECK-LE-NEXT: uxtb r1, r2
591 ; CHECK-LE-NEXT: lsls r2, r2, #31
592 ; CHECK-LE-NEXT: itt ne
593 ; CHECK-LE-NEXT: ldrhne r2, [r0]
594 ; CHECK-LE-NEXT: vmovne.16 q0[0], r2
595 ; CHECK-LE-NEXT: lsls r2, r1, #30
596 ; CHECK-LE-NEXT: itt mi
597 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #2]
598 ; CHECK-LE-NEXT: vmovmi.16 q0[1], r2
599 ; CHECK-LE-NEXT: lsls r2, r1, #29
600 ; CHECK-LE-NEXT: itt mi
601 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #4]
602 ; CHECK-LE-NEXT: vmovmi.16 q0[2], r2
603 ; CHECK-LE-NEXT: lsls r2, r1, #28
604 ; CHECK-LE-NEXT: itt mi
605 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #6]
606 ; CHECK-LE-NEXT: vmovmi.16 q0[3], r2
607 ; CHECK-LE-NEXT: lsls r2, r1, #27
608 ; CHECK-LE-NEXT: itt mi
609 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #8]
610 ; CHECK-LE-NEXT: vmovmi.16 q0[4], r2
611 ; CHECK-LE-NEXT: lsls r2, r1, #26
612 ; CHECK-LE-NEXT: itt mi
613 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #10]
614 ; CHECK-LE-NEXT: vmovmi.16 q0[5], r2
615 ; CHECK-LE-NEXT: lsls r2, r1, #25
616 ; CHECK-LE-NEXT: itt mi
617 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #12]
618 ; CHECK-LE-NEXT: vmovmi.16 q0[6], r2
619 ; CHECK-LE-NEXT: lsls r1, r1, #24
620 ; CHECK-LE-NEXT: itt mi
621 ; CHECK-LE-NEXT: ldrhmi r0, [r0, #14]
622 ; CHECK-LE-NEXT: vmovmi.16 q0[7], r0
623 ; CHECK-LE-NEXT: add sp, #4
624 ; CHECK-LE-NEXT: bx lr
626 ; CHECK-BE-LABEL: masked_v8i16_align1_undef:
627 ; CHECK-BE: @ %bb.0: @ %entry
628 ; CHECK-BE-NEXT: .pad #4
629 ; CHECK-BE-NEXT: sub sp, #4
630 ; CHECK-BE-NEXT: vrev64.16 q1, q0
631 ; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr
632 ; CHECK-BE-NEXT: @ implicit-def: $q1
633 ; CHECK-BE-NEXT: vmrs r1, p0
634 ; CHECK-BE-NEXT: ubfx r2, r1, #14, #1
635 ; CHECK-BE-NEXT: rsbs r3, r2, #0
636 ; CHECK-BE-NEXT: movs r2, #0
637 ; CHECK-BE-NEXT: bfi r2, r3, #0, #1
638 ; CHECK-BE-NEXT: ubfx r3, r1, #12, #1
639 ; CHECK-BE-NEXT: rsbs r3, r3, #0
640 ; CHECK-BE-NEXT: bfi r2, r3, #1, #1
641 ; CHECK-BE-NEXT: ubfx r3, r1, #10, #1
642 ; CHECK-BE-NEXT: rsbs r3, r3, #0
643 ; CHECK-BE-NEXT: bfi r2, r3, #2, #1
644 ; CHECK-BE-NEXT: ubfx r3, r1, #8, #1
645 ; CHECK-BE-NEXT: rsbs r3, r3, #0
646 ; CHECK-BE-NEXT: bfi r2, r3, #3, #1
647 ; CHECK-BE-NEXT: ubfx r3, r1, #6, #1
648 ; CHECK-BE-NEXT: rsbs r3, r3, #0
649 ; CHECK-BE-NEXT: bfi r2, r3, #4, #1
650 ; CHECK-BE-NEXT: ubfx r3, r1, #4, #1
651 ; CHECK-BE-NEXT: rsbs r3, r3, #0
652 ; CHECK-BE-NEXT: bfi r2, r3, #5, #1
653 ; CHECK-BE-NEXT: ubfx r3, r1, #2, #1
654 ; CHECK-BE-NEXT: and r1, r1, #1
655 ; CHECK-BE-NEXT: rsbs r3, r3, #0
656 ; CHECK-BE-NEXT: bfi r2, r3, #6, #1
657 ; CHECK-BE-NEXT: rsbs r1, r1, #0
658 ; CHECK-BE-NEXT: bfi r2, r1, #7, #1
659 ; CHECK-BE-NEXT: uxtb r1, r2
660 ; CHECK-BE-NEXT: lsls r2, r2, #24
661 ; CHECK-BE-NEXT: itt mi
662 ; CHECK-BE-NEXT: ldrhmi r2, [r0]
663 ; CHECK-BE-NEXT: vmovmi.16 q1[0], r2
664 ; CHECK-BE-NEXT: lsls r2, r1, #25
665 ; CHECK-BE-NEXT: itt mi
666 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #2]
667 ; CHECK-BE-NEXT: vmovmi.16 q1[1], r2
668 ; CHECK-BE-NEXT: lsls r2, r1, #26
669 ; CHECK-BE-NEXT: itt mi
670 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #4]
671 ; CHECK-BE-NEXT: vmovmi.16 q1[2], r2
672 ; CHECK-BE-NEXT: lsls r2, r1, #27
673 ; CHECK-BE-NEXT: itt mi
674 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #6]
675 ; CHECK-BE-NEXT: vmovmi.16 q1[3], r2
676 ; CHECK-BE-NEXT: lsls r2, r1, #28
677 ; CHECK-BE-NEXT: itt mi
678 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #8]
679 ; CHECK-BE-NEXT: vmovmi.16 q1[4], r2
680 ; CHECK-BE-NEXT: lsls r2, r1, #29
681 ; CHECK-BE-NEXT: itt mi
682 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #10]
683 ; CHECK-BE-NEXT: vmovmi.16 q1[5], r2
684 ; CHECK-BE-NEXT: lsls r2, r1, #30
685 ; CHECK-BE-NEXT: itt mi
686 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #12]
687 ; CHECK-BE-NEXT: vmovmi.16 q1[6], r2
688 ; CHECK-BE-NEXT: lsls r1, r1, #31
689 ; CHECK-BE-NEXT: itt ne
690 ; CHECK-BE-NEXT: ldrhne r0, [r0, #14]
691 ; CHECK-BE-NEXT: vmovne.16 q1[7], r0
692 ; CHECK-BE-NEXT: vrev64.16 q0, q1
693 ; CHECK-BE-NEXT: add sp, #4
694 ; CHECK-BE-NEXT: bx lr
696 %c = icmp sgt <8 x i16> %a, zeroinitializer
697 %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 1, <8 x i1> %c, <8 x i16> undef)
701 define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align4_other(<8 x i16> *%dest, <8 x i16> %a) {
702 ; CHECK-LE-LABEL: masked_v8i16_align4_other:
703 ; CHECK-LE: @ %bb.0: @ %entry
704 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
705 ; CHECK-LE-NEXT: vldrht.u16 q1, [r0]
706 ; CHECK-LE-NEXT: vpsel q0, q1, q0
707 ; CHECK-LE-NEXT: bx lr
709 ; CHECK-BE-LABEL: masked_v8i16_align4_other:
710 ; CHECK-BE: @ %bb.0: @ %entry
711 ; CHECK-BE-NEXT: vrev64.16 q1, q0
712 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
713 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0]
714 ; CHECK-BE-NEXT: vpsel q1, q0, q1
715 ; CHECK-BE-NEXT: vrev64.16 q0, q1
716 ; CHECK-BE-NEXT: bx lr
718 %c = icmp sgt <8 x i16> %a, zeroinitializer
719 %l = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %dest, i32 2, <8 x i1> %c, <8 x i16> %a)
723 define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_zero(<8 x i8> *%dest, <8 x i8> %a) {
724 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_zero:
725 ; CHECK-LE: @ %bb.0: @ %entry
726 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
727 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
728 ; CHECK-LE-NEXT: vldrbt.s16 q0, [r0]
729 ; CHECK-LE-NEXT: bx lr
731 ; CHECK-BE-LABEL: sext8_masked_v8i16_align1_zero:
732 ; CHECK-BE: @ %bb.0: @ %entry
733 ; CHECK-BE-NEXT: vrev64.16 q1, q0
734 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
735 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
736 ; CHECK-BE-NEXT: vldrbt.s16 q1, [r0]
737 ; CHECK-BE-NEXT: vrev64.16 q0, q1
738 ; CHECK-BE-NEXT: bx lr
740 %c = icmp sgt <8 x i8> %a, zeroinitializer
741 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
742 %ext = sext <8 x i8> %l to <8 x i16>
746 define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_undef(<8 x i8> *%dest, <8 x i8> %a) {
747 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_undef:
748 ; CHECK-LE: @ %bb.0: @ %entry
749 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
750 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
751 ; CHECK-LE-NEXT: vldrbt.s16 q0, [r0]
752 ; CHECK-LE-NEXT: bx lr
754 ; CHECK-BE-LABEL: sext8_masked_v8i16_align1_undef:
755 ; CHECK-BE: @ %bb.0: @ %entry
756 ; CHECK-BE-NEXT: vrev64.16 q1, q0
757 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
758 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
759 ; CHECK-BE-NEXT: vldrbt.s16 q1, [r0]
760 ; CHECK-BE-NEXT: vrev64.16 q0, q1
761 ; CHECK-BE-NEXT: bx lr
763 %c = icmp sgt <8 x i8> %a, zeroinitializer
764 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> undef)
765 %ext = sext <8 x i8> %l to <8 x i16>
769 define arm_aapcs_vfpcc <8 x i16> @sext8_masked_v8i16_align1_other(<8 x i8> *%dest, <8 x i8> %a) {
770 ; CHECK-LE-LABEL: sext8_masked_v8i16_align1_other:
771 ; CHECK-LE: @ %bb.0: @ %entry
772 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
773 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
774 ; CHECK-LE-NEXT: vldrbt.s16 q1, [r0]
775 ; CHECK-LE-NEXT: vpsel q0, q1, q0
776 ; CHECK-LE-NEXT: bx lr
778 ; CHECK-BE-LABEL: sext8_masked_v8i16_align1_other:
779 ; CHECK-BE: @ %bb.0: @ %entry
780 ; CHECK-BE-NEXT: vrev64.16 q1, q0
781 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
782 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
783 ; CHECK-BE-NEXT: vldrbt.s16 q1, [r0]
784 ; CHECK-BE-NEXT: vpsel q1, q1, q0
785 ; CHECK-BE-NEXT: vrev64.16 q0, q1
786 ; CHECK-BE-NEXT: bx lr
788 %c = icmp sgt <8 x i8> %a, zeroinitializer
789 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> %a)
790 %ext = sext <8 x i8> %l to <8 x i16>
794 define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_zero(<4 x i8> *%dest, <4 x i8> %a) {
795 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_zero:
796 ; CHECK-LE: @ %bb.0: @ %entry
797 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
798 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
799 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
800 ; CHECK-LE-NEXT: vldrbt.s32 q0, [r0]
801 ; CHECK-LE-NEXT: bx lr
803 ; CHECK-BE-LABEL: sext8_masked_v4i32_align1_zero:
804 ; CHECK-BE: @ %bb.0: @ %entry
805 ; CHECK-BE-NEXT: vrev64.32 q1, q0
806 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
807 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
808 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
809 ; CHECK-BE-NEXT: vldrbt.s32 q1, [r0]
810 ; CHECK-BE-NEXT: vrev64.32 q0, q1
811 ; CHECK-BE-NEXT: bx lr
813 %c = icmp sgt <4 x i8> %a, zeroinitializer
814 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
815 %ext = sext <4 x i8> %l to <4 x i32>
819 define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_undef(<4 x i8> *%dest, <4 x i8> %a) {
820 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_undef:
821 ; CHECK-LE: @ %bb.0: @ %entry
822 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
823 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
824 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
825 ; CHECK-LE-NEXT: vldrbt.s32 q0, [r0]
826 ; CHECK-LE-NEXT: bx lr
828 ; CHECK-BE-LABEL: sext8_masked_v4i32_align1_undef:
829 ; CHECK-BE: @ %bb.0: @ %entry
830 ; CHECK-BE-NEXT: vrev64.32 q1, q0
831 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
832 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
833 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
834 ; CHECK-BE-NEXT: vldrbt.s32 q1, [r0]
835 ; CHECK-BE-NEXT: vrev64.32 q0, q1
836 ; CHECK-BE-NEXT: bx lr
838 %c = icmp sgt <4 x i8> %a, zeroinitializer
839 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> undef)
840 %ext = sext <4 x i8> %l to <4 x i32>
844 define arm_aapcs_vfpcc <4 x i32> @sext8_masked_v4i32_align1_other(<4 x i8> *%dest, <4 x i8> %a) {
845 ; CHECK-LE-LABEL: sext8_masked_v4i32_align1_other:
846 ; CHECK-LE: @ %bb.0: @ %entry
847 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
848 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
849 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
850 ; CHECK-LE-NEXT: vldrbt.s32 q1, [r0]
851 ; CHECK-LE-NEXT: vpsel q0, q1, q0
852 ; CHECK-LE-NEXT: bx lr
854 ; CHECK-BE-LABEL: sext8_masked_v4i32_align1_other:
855 ; CHECK-BE: @ %bb.0: @ %entry
856 ; CHECK-BE-NEXT: vrev64.32 q1, q0
857 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
858 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
859 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
860 ; CHECK-BE-NEXT: vldrbt.s32 q1, [r0]
861 ; CHECK-BE-NEXT: vpsel q1, q1, q0
862 ; CHECK-BE-NEXT: vrev64.32 q0, q1
863 ; CHECK-BE-NEXT: bx lr
865 %c = icmp sgt <4 x i8> %a, zeroinitializer
866 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> %a)
867 %ext = sext <4 x i8> %l to <4 x i32>
871 define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_zero(<4 x i8> *%dest, <4 x i8> %a) {
872 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_zero:
873 ; CHECK-LE: @ %bb.0: @ %entry
874 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
875 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
876 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
877 ; CHECK-LE-NEXT: vldrbt.u32 q0, [r0]
878 ; CHECK-LE-NEXT: bx lr
880 ; CHECK-BE-LABEL: zext8_masked_v4i32_align1_zero:
881 ; CHECK-BE: @ %bb.0: @ %entry
882 ; CHECK-BE-NEXT: vrev64.32 q1, q0
883 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
884 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
885 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
886 ; CHECK-BE-NEXT: vldrbt.u32 q1, [r0]
887 ; CHECK-BE-NEXT: vrev64.32 q0, q1
888 ; CHECK-BE-NEXT: bx lr
890 %c = icmp sgt <4 x i8> %a, zeroinitializer
891 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
892 %ext = zext <4 x i8> %l to <4 x i32>
896 define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_undef(<4 x i8> *%dest, <4 x i8> %a) {
897 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_undef:
898 ; CHECK-LE: @ %bb.0: @ %entry
899 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
900 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
901 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
902 ; CHECK-LE-NEXT: vldrbt.u32 q0, [r0]
903 ; CHECK-LE-NEXT: bx lr
905 ; CHECK-BE-LABEL: zext8_masked_v4i32_align1_undef:
906 ; CHECK-BE: @ %bb.0: @ %entry
907 ; CHECK-BE-NEXT: vrev64.32 q1, q0
908 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
909 ; CHECK-BE-NEXT: vmovlb.s16 q0, q0
910 ; CHECK-BE-NEXT: vpt.s32 gt, q0, zr
911 ; CHECK-BE-NEXT: vldrbt.u32 q1, [r0]
912 ; CHECK-BE-NEXT: vrev64.32 q0, q1
913 ; CHECK-BE-NEXT: bx lr
915 %c = icmp sgt <4 x i8> %a, zeroinitializer
916 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> undef)
917 %ext = zext <4 x i8> %l to <4 x i32>
921 define arm_aapcs_vfpcc <4 x i32> @zext8_masked_v4i32_align1_other(<4 x i8> *%dest, <4 x i8> %a) {
922 ; CHECK-LE-LABEL: zext8_masked_v4i32_align1_other:
923 ; CHECK-LE: @ %bb.0: @ %entry
924 ; CHECK-LE-NEXT: vmov.i32 q1, #0xff
925 ; CHECK-LE-NEXT: vand q1, q0, q1
926 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
927 ; CHECK-LE-NEXT: vmovlb.s16 q0, q0
928 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
929 ; CHECK-LE-NEXT: vldrbt.u32 q0, [r0]
930 ; CHECK-LE-NEXT: vpsel q0, q0, q1
931 ; CHECK-LE-NEXT: bx lr
933 ; CHECK-BE-LABEL: zext8_masked_v4i32_align1_other:
934 ; CHECK-BE: @ %bb.0: @ %entry
935 ; CHECK-BE-NEXT: vmov.i32 q1, #0xff
936 ; CHECK-BE-NEXT: vrev64.32 q2, q0
937 ; CHECK-BE-NEXT: vand q0, q2, q1
938 ; CHECK-BE-NEXT: vmovlb.s8 q1, q2
939 ; CHECK-BE-NEXT: vmovlb.s16 q1, q1
940 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
941 ; CHECK-BE-NEXT: vldrbt.u32 q1, [r0]
942 ; CHECK-BE-NEXT: vpsel q1, q1, q0
943 ; CHECK-BE-NEXT: vrev64.32 q0, q1
944 ; CHECK-BE-NEXT: bx lr
946 %c = icmp sgt <4 x i8> %a, zeroinitializer
947 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> %a)
948 %ext = zext <4 x i8> %l to <4 x i32>
952 define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_zero(<8 x i8> *%dest, <8 x i8> %a) {
953 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_zero:
954 ; CHECK-LE: @ %bb.0: @ %entry
955 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
956 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
957 ; CHECK-LE-NEXT: vldrbt.u16 q0, [r0]
958 ; CHECK-LE-NEXT: bx lr
960 ; CHECK-BE-LABEL: zext8_masked_v8i16_align1_zero:
961 ; CHECK-BE: @ %bb.0: @ %entry
962 ; CHECK-BE-NEXT: vrev64.16 q1, q0
963 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
964 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
965 ; CHECK-BE-NEXT: vldrbt.u16 q1, [r0]
966 ; CHECK-BE-NEXT: vrev64.16 q0, q1
967 ; CHECK-BE-NEXT: bx lr
969 %c = icmp sgt <8 x i8> %a, zeroinitializer
970 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
971 %ext = zext <8 x i8> %l to <8 x i16>
975 define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_undef(<8 x i8> *%dest, <8 x i8> %a) {
976 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_undef:
977 ; CHECK-LE: @ %bb.0: @ %entry
978 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
979 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
980 ; CHECK-LE-NEXT: vldrbt.u16 q0, [r0]
981 ; CHECK-LE-NEXT: bx lr
983 ; CHECK-BE-LABEL: zext8_masked_v8i16_align1_undef:
984 ; CHECK-BE: @ %bb.0: @ %entry
985 ; CHECK-BE-NEXT: vrev64.16 q1, q0
986 ; CHECK-BE-NEXT: vmovlb.s8 q0, q1
987 ; CHECK-BE-NEXT: vpt.s16 gt, q0, zr
988 ; CHECK-BE-NEXT: vldrbt.u16 q1, [r0]
989 ; CHECK-BE-NEXT: vrev64.16 q0, q1
990 ; CHECK-BE-NEXT: bx lr
992 %c = icmp sgt <8 x i8> %a, zeroinitializer
993 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> undef)
994 %ext = zext <8 x i8> %l to <8 x i16>
998 define arm_aapcs_vfpcc <8 x i16> @zext8_masked_v8i16_align1_other(<8 x i8> *%dest, <8 x i8> %a) {
999 ; CHECK-LE-LABEL: zext8_masked_v8i16_align1_other:
1000 ; CHECK-LE: @ %bb.0: @ %entry
1001 ; CHECK-LE-NEXT: vmovlb.u8 q1, q0
1002 ; CHECK-LE-NEXT: vmovlb.s8 q0, q0
1003 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1004 ; CHECK-LE-NEXT: vldrbt.u16 q0, [r0]
1005 ; CHECK-LE-NEXT: vpsel q0, q0, q1
1006 ; CHECK-LE-NEXT: bx lr
1008 ; CHECK-BE-LABEL: zext8_masked_v8i16_align1_other:
1009 ; CHECK-BE: @ %bb.0: @ %entry
1010 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1011 ; CHECK-BE-NEXT: vmovlb.u8 q0, q1
1012 ; CHECK-BE-NEXT: vmovlb.s8 q1, q1
1013 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1014 ; CHECK-BE-NEXT: vldrbt.u16 q1, [r0]
1015 ; CHECK-BE-NEXT: vpsel q1, q1, q0
1016 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1017 ; CHECK-BE-NEXT: bx lr
1019 %c = icmp sgt <8 x i8> %a, zeroinitializer
1020 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> %a)
1021 %ext = zext <8 x i8> %l to <8 x i16>
1025 define i8* @masked_v8i16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
1026 ; CHECK-LE-LABEL: masked_v8i16_preinc:
1027 ; CHECK-LE: @ %bb.0: @ %entry
1028 ; CHECK-LE-NEXT: vldr d1, [sp]
1029 ; CHECK-LE-NEXT: vmov d0, r2, r3
1030 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1031 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0, #4]!
1032 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1033 ; CHECK-LE-NEXT: bx lr
1035 ; CHECK-BE-LABEL: masked_v8i16_preinc:
1036 ; CHECK-BE: @ %bb.0: @ %entry
1037 ; CHECK-BE-NEXT: vldr d1, [sp]
1038 ; CHECK-BE-NEXT: vmov d0, r3, r2
1039 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1040 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1041 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0, #4]!
1042 ; CHECK-BE-NEXT: vstrh.16 q0, [r1]
1043 ; CHECK-BE-NEXT: bx lr
1045 %z = getelementptr inbounds i8, i8* %x, i32 4
1046 %0 = bitcast i8* %z to <8 x i16>*
1047 %c = icmp sgt <8 x i16> %a, zeroinitializer
1048 %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef)
1049 %2 = bitcast i8* %y to <8 x i16>*
1050 store <8 x i16> %1, <8 x i16>* %2, align 4
1054 define arm_aapcs_vfpcc i8* @masked_v8i16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
1055 ; CHECK-LE-LABEL: masked_v8i16_postinc:
1056 ; CHECK-LE: @ %bb.0: @ %entry
1057 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1058 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0], #4
1059 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1060 ; CHECK-LE-NEXT: bx lr
1062 ; CHECK-BE-LABEL: masked_v8i16_postinc:
1063 ; CHECK-BE: @ %bb.0: @ %entry
1064 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1065 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1066 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0], #4
1067 ; CHECK-BE-NEXT: vstrh.16 q0, [r1]
1068 ; CHECK-BE-NEXT: bx lr
1070 %z = getelementptr inbounds i8, i8* %x, i32 4
1071 %0 = bitcast i8* %x to <8 x i16>*
1072 %c = icmp sgt <8 x i16> %a, zeroinitializer
1073 %1 = call <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>* %0, i32 4, <8 x i1> %c, <8 x i16> undef)
1074 %2 = bitcast i8* %y to <8 x i16>*
1075 store <8 x i16> %1, <8 x i16>* %2, align 4
1080 define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_zero(<16 x i8> *%dest, <16 x i8> %a) {
1081 ; CHECK-LE-LABEL: masked_v16i8_align4_zero:
1082 ; CHECK-LE: @ %bb.0: @ %entry
1083 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1084 ; CHECK-LE-NEXT: vldrbt.u8 q0, [r0]
1085 ; CHECK-LE-NEXT: bx lr
1087 ; CHECK-BE-LABEL: masked_v16i8_align4_zero:
1088 ; CHECK-BE: @ %bb.0: @ %entry
1089 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1090 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1091 ; CHECK-BE-NEXT: vldrbt.u8 q1, [r0]
1092 ; CHECK-BE-NEXT: vrev64.8 q0, q1
1093 ; CHECK-BE-NEXT: bx lr
1095 %c = icmp sgt <16 x i8> %a, zeroinitializer
1096 %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> zeroinitializer)
1100 define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_undef(<16 x i8> *%dest, <16 x i8> %a) {
1101 ; CHECK-LE-LABEL: masked_v16i8_align4_undef:
1102 ; CHECK-LE: @ %bb.0: @ %entry
1103 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1104 ; CHECK-LE-NEXT: vldrbt.u8 q0, [r0]
1105 ; CHECK-LE-NEXT: bx lr
1107 ; CHECK-BE-LABEL: masked_v16i8_align4_undef:
1108 ; CHECK-BE: @ %bb.0: @ %entry
1109 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1110 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1111 ; CHECK-BE-NEXT: vldrbt.u8 q1, [r0]
1112 ; CHECK-BE-NEXT: vrev64.8 q0, q1
1113 ; CHECK-BE-NEXT: bx lr
1115 %c = icmp sgt <16 x i8> %a, zeroinitializer
1116 %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> undef)
1120 define arm_aapcs_vfpcc <16 x i8> @masked_v16i8_align4_other(<16 x i8> *%dest, <16 x i8> %a) {
1121 ; CHECK-LE-LABEL: masked_v16i8_align4_other:
1122 ; CHECK-LE: @ %bb.0: @ %entry
1123 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1124 ; CHECK-LE-NEXT: vldrbt.u8 q1, [r0]
1125 ; CHECK-LE-NEXT: vpsel q0, q1, q0
1126 ; CHECK-LE-NEXT: bx lr
1128 ; CHECK-BE-LABEL: masked_v16i8_align4_other:
1129 ; CHECK-BE: @ %bb.0: @ %entry
1130 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1131 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1132 ; CHECK-BE-NEXT: vldrbt.u8 q0, [r0]
1133 ; CHECK-BE-NEXT: vpsel q1, q0, q1
1134 ; CHECK-BE-NEXT: vrev64.8 q0, q1
1135 ; CHECK-BE-NEXT: bx lr
1137 %c = icmp sgt <16 x i8> %a, zeroinitializer
1138 %l = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %dest, i32 1, <16 x i1> %c, <16 x i8> %a)
1142 define arm_aapcs_vfpcc i8* @masked_v16i8_preinc(i8* %x, i8* %y, <16 x i8> %a) {
1143 ; CHECK-LE-LABEL: masked_v16i8_preinc:
1144 ; CHECK-LE: @ %bb.0: @ %entry
1145 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1146 ; CHECK-LE-NEXT: vldrbt.u8 q0, [r0, #4]!
1147 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1148 ; CHECK-LE-NEXT: bx lr
1150 ; CHECK-BE-LABEL: masked_v16i8_preinc:
1151 ; CHECK-BE: @ %bb.0: @ %entry
1152 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1153 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1154 ; CHECK-BE-NEXT: vldrbt.u8 q0, [r0, #4]!
1155 ; CHECK-BE-NEXT: vstrb.8 q0, [r1]
1156 ; CHECK-BE-NEXT: bx lr
1158 %z = getelementptr inbounds i8, i8* %x, i32 4
1159 %0 = bitcast i8* %z to <16 x i8>*
1160 %c = icmp sgt <16 x i8> %a, zeroinitializer
1161 %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef)
1162 %2 = bitcast i8* %y to <16 x i8>*
1163 store <16 x i8> %1, <16 x i8>* %2, align 4
1167 define arm_aapcs_vfpcc i8* @masked_v16i8_postinc(i8* %x, i8* %y, <16 x i8> %a) {
1168 ; CHECK-LE-LABEL: masked_v16i8_postinc:
1169 ; CHECK-LE: @ %bb.0: @ %entry
1170 ; CHECK-LE-NEXT: vpt.s8 gt, q0, zr
1171 ; CHECK-LE-NEXT: vldrbt.u8 q0, [r0], #4
1172 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1173 ; CHECK-LE-NEXT: bx lr
1175 ; CHECK-BE-LABEL: masked_v16i8_postinc:
1176 ; CHECK-BE: @ %bb.0: @ %entry
1177 ; CHECK-BE-NEXT: vrev64.8 q1, q0
1178 ; CHECK-BE-NEXT: vpt.s8 gt, q1, zr
1179 ; CHECK-BE-NEXT: vldrbt.u8 q0, [r0], #4
1180 ; CHECK-BE-NEXT: vstrb.8 q0, [r1]
1181 ; CHECK-BE-NEXT: bx lr
1183 %z = getelementptr inbounds i8, i8* %x, i32 4
1184 %0 = bitcast i8* %x to <16 x i8>*
1185 %c = icmp sgt <16 x i8> %a, zeroinitializer
1186 %1 = call <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>* %0, i32 4, <16 x i1> %c, <16 x i8> undef)
1187 %2 = bitcast i8* %y to <16 x i8>*
1188 store <16 x i8> %1, <16 x i8>* %2, align 4
1193 define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_zero(<4 x float> *%dest, <4 x i32> %a) {
1194 ; CHECK-LE-LABEL: masked_v4f32_align4_zero:
1195 ; CHECK-LE: @ %bb.0: @ %entry
1196 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1197 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
1198 ; CHECK-LE-NEXT: bx lr
1200 ; CHECK-BE-LABEL: masked_v4f32_align4_zero:
1201 ; CHECK-BE: @ %bb.0: @ %entry
1202 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1203 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1204 ; CHECK-BE-NEXT: vldrwt.u32 q1, [r0]
1205 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1206 ; CHECK-BE-NEXT: bx lr
1208 %c = icmp sgt <4 x i32> %a, zeroinitializer
1209 %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> zeroinitializer)
1213 define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_undef(<4 x float> *%dest, <4 x i32> %a) {
1214 ; CHECK-LE-LABEL: masked_v4f32_align4_undef:
1215 ; CHECK-LE: @ %bb.0: @ %entry
1216 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1217 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
1218 ; CHECK-LE-NEXT: bx lr
1220 ; CHECK-BE-LABEL: masked_v4f32_align4_undef:
1221 ; CHECK-BE: @ %bb.0: @ %entry
1222 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1223 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1224 ; CHECK-BE-NEXT: vldrwt.u32 q1, [r0]
1225 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1226 ; CHECK-BE-NEXT: bx lr
1228 %c = icmp sgt <4 x i32> %a, zeroinitializer
1229 %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> undef)
1233 define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align1_undef(<4 x float> *%dest, <4 x i32> %a) {
1234 ; CHECK-LE-LABEL: masked_v4f32_align1_undef:
1235 ; CHECK-LE: @ %bb.0: @ %entry
1236 ; CHECK-LE-NEXT: .pad #4
1237 ; CHECK-LE-NEXT: sub sp, #4
1238 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
1239 ; CHECK-LE-NEXT: @ implicit-def: $q0
1240 ; CHECK-LE-NEXT: vmrs r2, p0
1241 ; CHECK-LE-NEXT: and r1, r2, #1
1242 ; CHECK-LE-NEXT: rsbs r3, r1, #0
1243 ; CHECK-LE-NEXT: movs r1, #0
1244 ; CHECK-LE-NEXT: bfi r1, r3, #0, #1
1245 ; CHECK-LE-NEXT: ubfx r3, r2, #4, #1
1246 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1247 ; CHECK-LE-NEXT: bfi r1, r3, #1, #1
1248 ; CHECK-LE-NEXT: ubfx r3, r2, #8, #1
1249 ; CHECK-LE-NEXT: ubfx r2, r2, #12, #1
1250 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1251 ; CHECK-LE-NEXT: bfi r1, r3, #2, #1
1252 ; CHECK-LE-NEXT: rsbs r2, r2, #0
1253 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
1254 ; CHECK-LE-NEXT: lsls r2, r1, #31
1255 ; CHECK-LE-NEXT: itt ne
1256 ; CHECK-LE-NEXT: ldrne r2, [r0]
1257 ; CHECK-LE-NEXT: vmovne s0, r2
1258 ; CHECK-LE-NEXT: lsls r2, r1, #30
1259 ; CHECK-LE-NEXT: itt mi
1260 ; CHECK-LE-NEXT: ldrmi r2, [r0, #4]
1261 ; CHECK-LE-NEXT: vmovmi s1, r2
1262 ; CHECK-LE-NEXT: lsls r2, r1, #29
1263 ; CHECK-LE-NEXT: itt mi
1264 ; CHECK-LE-NEXT: ldrmi r2, [r0, #8]
1265 ; CHECK-LE-NEXT: vmovmi s2, r2
1266 ; CHECK-LE-NEXT: lsls r1, r1, #28
1267 ; CHECK-LE-NEXT: itt mi
1268 ; CHECK-LE-NEXT: ldrmi r0, [r0, #12]
1269 ; CHECK-LE-NEXT: vmovmi s3, r0
1270 ; CHECK-LE-NEXT: add sp, #4
1271 ; CHECK-LE-NEXT: bx lr
1273 ; CHECK-BE-LABEL: masked_v4f32_align1_undef:
1274 ; CHECK-BE: @ %bb.0: @ %entry
1275 ; CHECK-BE-NEXT: .pad #4
1276 ; CHECK-BE-NEXT: sub sp, #4
1277 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1278 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
1279 ; CHECK-BE-NEXT: @ implicit-def: $q1
1280 ; CHECK-BE-NEXT: vmrs r2, p0
1281 ; CHECK-BE-NEXT: ubfx r1, r2, #12, #1
1282 ; CHECK-BE-NEXT: rsbs r3, r1, #0
1283 ; CHECK-BE-NEXT: movs r1, #0
1284 ; CHECK-BE-NEXT: bfi r1, r3, #0, #1
1285 ; CHECK-BE-NEXT: ubfx r3, r2, #8, #1
1286 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1287 ; CHECK-BE-NEXT: bfi r1, r3, #1, #1
1288 ; CHECK-BE-NEXT: ubfx r3, r2, #4, #1
1289 ; CHECK-BE-NEXT: and r2, r2, #1
1290 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1291 ; CHECK-BE-NEXT: bfi r1, r3, #2, #1
1292 ; CHECK-BE-NEXT: rsbs r2, r2, #0
1293 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
1294 ; CHECK-BE-NEXT: lsls r2, r1, #28
1295 ; CHECK-BE-NEXT: itt mi
1296 ; CHECK-BE-NEXT: ldrmi r2, [r0]
1297 ; CHECK-BE-NEXT: vmovmi s4, r2
1298 ; CHECK-BE-NEXT: lsls r2, r1, #29
1299 ; CHECK-BE-NEXT: itt mi
1300 ; CHECK-BE-NEXT: ldrmi r2, [r0, #4]
1301 ; CHECK-BE-NEXT: vmovmi s5, r2
1302 ; CHECK-BE-NEXT: lsls r2, r1, #30
1303 ; CHECK-BE-NEXT: itt mi
1304 ; CHECK-BE-NEXT: ldrmi r2, [r0, #8]
1305 ; CHECK-BE-NEXT: vmovmi s6, r2
1306 ; CHECK-BE-NEXT: lsls r1, r1, #31
1307 ; CHECK-BE-NEXT: itt ne
1308 ; CHECK-BE-NEXT: ldrne r0, [r0, #12]
1309 ; CHECK-BE-NEXT: vmovne s7, r0
1310 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1311 ; CHECK-BE-NEXT: add sp, #4
1312 ; CHECK-BE-NEXT: bx lr
1314 %c = icmp sgt <4 x i32> %a, zeroinitializer
1315 %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 1, <4 x i1> %c, <4 x float> undef)
1319 define arm_aapcs_vfpcc <4 x float> @masked_v4f32_align4_other(<4 x float> *%dest, <4 x i32> %a, <4 x float> %b) {
1320 ; CHECK-LE-LABEL: masked_v4f32_align4_other:
1321 ; CHECK-LE: @ %bb.0: @ %entry
1322 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1323 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0]
1324 ; CHECK-LE-NEXT: vpsel q0, q0, q1
1325 ; CHECK-LE-NEXT: bx lr
1327 ; CHECK-BE-LABEL: masked_v4f32_align4_other:
1328 ; CHECK-BE: @ %bb.0: @ %entry
1329 ; CHECK-BE-NEXT: vrev64.32 q2, q1
1330 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1331 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1332 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0]
1333 ; CHECK-BE-NEXT: vpsel q1, q0, q2
1334 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1335 ; CHECK-BE-NEXT: bx lr
1337 %c = icmp sgt <4 x i32> %a, zeroinitializer
1338 %l = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %dest, i32 4, <4 x i1> %c, <4 x float> %b)
1342 define arm_aapcs_vfpcc i8* @masked_v4f32_preinc(i8* %x, i8* %y, <4 x i32> %a) {
1343 ; CHECK-LE-LABEL: masked_v4f32_preinc:
1344 ; CHECK-LE: @ %bb.0: @ %entry
1345 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1346 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0, #4]!
1347 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1348 ; CHECK-LE-NEXT: bx lr
1350 ; CHECK-BE-LABEL: masked_v4f32_preinc:
1351 ; CHECK-BE: @ %bb.0: @ %entry
1352 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1353 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1354 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0, #4]!
1355 ; CHECK-BE-NEXT: vstrw.32 q0, [r1]
1356 ; CHECK-BE-NEXT: bx lr
1358 %z = getelementptr inbounds i8, i8* %x, i32 4
1359 %0 = bitcast i8* %z to <4 x float>*
1360 %c = icmp sgt <4 x i32> %a, zeroinitializer
1361 %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
1362 %2 = bitcast i8* %y to <4 x float>*
1363 store <4 x float> %1, <4 x float>* %2, align 4
1367 define arm_aapcs_vfpcc i8* @masked_v4f32_postinc(i8* %x, i8* %y, <4 x i32> %a) {
1368 ; CHECK-LE-LABEL: masked_v4f32_postinc:
1369 ; CHECK-LE: @ %bb.0: @ %entry
1370 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1371 ; CHECK-LE-NEXT: vldrwt.u32 q0, [r0], #4
1372 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1373 ; CHECK-LE-NEXT: bx lr
1375 ; CHECK-BE-LABEL: masked_v4f32_postinc:
1376 ; CHECK-BE: @ %bb.0: @ %entry
1377 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1378 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1379 ; CHECK-BE-NEXT: vldrwt.u32 q0, [r0], #4
1380 ; CHECK-BE-NEXT: vstrw.32 q0, [r1]
1381 ; CHECK-BE-NEXT: bx lr
1383 %z = getelementptr inbounds i8, i8* %x, i32 4
1384 %0 = bitcast i8* %x to <4 x float>*
1385 %c = icmp sgt <4 x i32> %a, zeroinitializer
1386 %1 = call <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>* %0, i32 4, <4 x i1> %c, <4 x float> undef)
1387 %2 = bitcast i8* %y to <4 x float>*
1388 store <4 x float> %1, <4 x float>* %2, align 4
1393 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_zero(<8 x half> *%dest, <8 x i16> %a) {
1394 ; CHECK-LE-LABEL: masked_v8f16_align4_zero:
1395 ; CHECK-LE: @ %bb.0: @ %entry
1396 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1397 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
1398 ; CHECK-LE-NEXT: bx lr
1400 ; CHECK-BE-LABEL: masked_v8f16_align4_zero:
1401 ; CHECK-BE: @ %bb.0: @ %entry
1402 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1403 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1404 ; CHECK-BE-NEXT: vldrht.u16 q1, [r0]
1405 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1406 ; CHECK-BE-NEXT: bx lr
1408 %c = icmp sgt <8 x i16> %a, zeroinitializer
1409 %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> zeroinitializer)
1413 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_undef(<8 x half> *%dest, <8 x i16> %a) {
1414 ; CHECK-LE-LABEL: masked_v8f16_align4_undef:
1415 ; CHECK-LE: @ %bb.0: @ %entry
1416 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1417 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
1418 ; CHECK-LE-NEXT: bx lr
1420 ; CHECK-BE-LABEL: masked_v8f16_align4_undef:
1421 ; CHECK-BE: @ %bb.0: @ %entry
1422 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1423 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1424 ; CHECK-BE-NEXT: vldrht.u16 q1, [r0]
1425 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1426 ; CHECK-BE-NEXT: bx lr
1428 %c = icmp sgt <8 x i16> %a, zeroinitializer
1429 %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> undef)
1433 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest, <8 x i16> %a) {
1434 ; CHECK-LE-LABEL: masked_v8f16_align1_undef:
1435 ; CHECK-LE: @ %bb.0: @ %entry
1436 ; CHECK-LE-NEXT: .pad #36
1437 ; CHECK-LE-NEXT: sub sp, #36
1438 ; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr
1439 ; CHECK-LE-NEXT: @ implicit-def: $q0
1440 ; CHECK-LE-NEXT: vmrs r1, p0
1441 ; CHECK-LE-NEXT: and r2, r1, #1
1442 ; CHECK-LE-NEXT: rsbs r3, r2, #0
1443 ; CHECK-LE-NEXT: movs r2, #0
1444 ; CHECK-LE-NEXT: bfi r2, r3, #0, #1
1445 ; CHECK-LE-NEXT: ubfx r3, r1, #2, #1
1446 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1447 ; CHECK-LE-NEXT: bfi r2, r3, #1, #1
1448 ; CHECK-LE-NEXT: ubfx r3, r1, #4, #1
1449 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1450 ; CHECK-LE-NEXT: bfi r2, r3, #2, #1
1451 ; CHECK-LE-NEXT: ubfx r3, r1, #6, #1
1452 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1453 ; CHECK-LE-NEXT: bfi r2, r3, #3, #1
1454 ; CHECK-LE-NEXT: ubfx r3, r1, #8, #1
1455 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1456 ; CHECK-LE-NEXT: bfi r2, r3, #4, #1
1457 ; CHECK-LE-NEXT: ubfx r3, r1, #10, #1
1458 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1459 ; CHECK-LE-NEXT: bfi r2, r3, #5, #1
1460 ; CHECK-LE-NEXT: ubfx r3, r1, #12, #1
1461 ; CHECK-LE-NEXT: ubfx r1, r1, #14, #1
1462 ; CHECK-LE-NEXT: rsbs r3, r3, #0
1463 ; CHECK-LE-NEXT: bfi r2, r3, #6, #1
1464 ; CHECK-LE-NEXT: rsbs r1, r1, #0
1465 ; CHECK-LE-NEXT: bfi r2, r1, #7, #1
1466 ; CHECK-LE-NEXT: uxtb r1, r2
1467 ; CHECK-LE-NEXT: lsls r2, r2, #31
1468 ; CHECK-LE-NEXT: bne .LBB45_9
1469 ; CHECK-LE-NEXT: @ %bb.1: @ %else
1470 ; CHECK-LE-NEXT: lsls r2, r1, #30
1471 ; CHECK-LE-NEXT: bmi .LBB45_10
1472 ; CHECK-LE-NEXT: .LBB45_2: @ %else2
1473 ; CHECK-LE-NEXT: lsls r2, r1, #29
1474 ; CHECK-LE-NEXT: bmi .LBB45_11
1475 ; CHECK-LE-NEXT: .LBB45_3: @ %else5
1476 ; CHECK-LE-NEXT: lsls r2, r1, #28
1477 ; CHECK-LE-NEXT: bmi .LBB45_12
1478 ; CHECK-LE-NEXT: .LBB45_4: @ %else8
1479 ; CHECK-LE-NEXT: lsls r2, r1, #27
1480 ; CHECK-LE-NEXT: bmi .LBB45_13
1481 ; CHECK-LE-NEXT: .LBB45_5: @ %else11
1482 ; CHECK-LE-NEXT: lsls r2, r1, #26
1483 ; CHECK-LE-NEXT: bmi .LBB45_14
1484 ; CHECK-LE-NEXT: .LBB45_6: @ %else14
1485 ; CHECK-LE-NEXT: lsls r2, r1, #25
1486 ; CHECK-LE-NEXT: bmi .LBB45_15
1487 ; CHECK-LE-NEXT: .LBB45_7: @ %else17
1488 ; CHECK-LE-NEXT: lsls r1, r1, #24
1489 ; CHECK-LE-NEXT: bmi .LBB45_16
1490 ; CHECK-LE-NEXT: .LBB45_8: @ %else20
1491 ; CHECK-LE-NEXT: add sp, #36
1492 ; CHECK-LE-NEXT: bx lr
1493 ; CHECK-LE-NEXT: .LBB45_9: @ %cond.load
1494 ; CHECK-LE-NEXT: ldrh r2, [r0]
1495 ; CHECK-LE-NEXT: strh.w r2, [sp, #28]
1496 ; CHECK-LE-NEXT: vldr.16 s0, [sp, #28]
1497 ; CHECK-LE-NEXT: lsls r2, r1, #30
1498 ; CHECK-LE-NEXT: bpl .LBB45_2
1499 ; CHECK-LE-NEXT: .LBB45_10: @ %cond.load1
1500 ; CHECK-LE-NEXT: ldrh r2, [r0, #2]
1501 ; CHECK-LE-NEXT: strh.w r2, [sp, #24]
1502 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #24]
1503 ; CHECK-LE-NEXT: vins.f16 s0, s4
1504 ; CHECK-LE-NEXT: lsls r2, r1, #29
1505 ; CHECK-LE-NEXT: bpl .LBB45_3
1506 ; CHECK-LE-NEXT: .LBB45_11: @ %cond.load4
1507 ; CHECK-LE-NEXT: ldrh r2, [r0, #4]
1508 ; CHECK-LE-NEXT: strh.w r2, [sp, #20]
1509 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #20]
1510 ; CHECK-LE-NEXT: vmov r2, s4
1511 ; CHECK-LE-NEXT: vmov.16 q0[2], r2
1512 ; CHECK-LE-NEXT: lsls r2, r1, #28
1513 ; CHECK-LE-NEXT: bpl .LBB45_4
1514 ; CHECK-LE-NEXT: .LBB45_12: @ %cond.load7
1515 ; CHECK-LE-NEXT: ldrh r2, [r0, #6]
1516 ; CHECK-LE-NEXT: strh.w r2, [sp, #16]
1517 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #16]
1518 ; CHECK-LE-NEXT: vins.f16 s1, s4
1519 ; CHECK-LE-NEXT: lsls r2, r1, #27
1520 ; CHECK-LE-NEXT: bpl .LBB45_5
1521 ; CHECK-LE-NEXT: .LBB45_13: @ %cond.load10
1522 ; CHECK-LE-NEXT: ldrh r2, [r0, #8]
1523 ; CHECK-LE-NEXT: strh.w r2, [sp, #12]
1524 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #12]
1525 ; CHECK-LE-NEXT: vmov r2, s4
1526 ; CHECK-LE-NEXT: vmov.16 q0[4], r2
1527 ; CHECK-LE-NEXT: lsls r2, r1, #26
1528 ; CHECK-LE-NEXT: bpl .LBB45_6
1529 ; CHECK-LE-NEXT: .LBB45_14: @ %cond.load13
1530 ; CHECK-LE-NEXT: ldrh r2, [r0, #10]
1531 ; CHECK-LE-NEXT: strh.w r2, [sp, #8]
1532 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #8]
1533 ; CHECK-LE-NEXT: vins.f16 s2, s4
1534 ; CHECK-LE-NEXT: lsls r2, r1, #25
1535 ; CHECK-LE-NEXT: bpl .LBB45_7
1536 ; CHECK-LE-NEXT: .LBB45_15: @ %cond.load16
1537 ; CHECK-LE-NEXT: ldrh r2, [r0, #12]
1538 ; CHECK-LE-NEXT: strh.w r2, [sp, #4]
1539 ; CHECK-LE-NEXT: vldr.16 s4, [sp, #4]
1540 ; CHECK-LE-NEXT: vmov r2, s4
1541 ; CHECK-LE-NEXT: vmov.16 q0[6], r2
1542 ; CHECK-LE-NEXT: lsls r1, r1, #24
1543 ; CHECK-LE-NEXT: bpl .LBB45_8
1544 ; CHECK-LE-NEXT: .LBB45_16: @ %cond.load19
1545 ; CHECK-LE-NEXT: ldrh r0, [r0, #14]
1546 ; CHECK-LE-NEXT: strh.w r0, [sp]
1547 ; CHECK-LE-NEXT: vldr.16 s4, [sp]
1548 ; CHECK-LE-NEXT: vins.f16 s3, s4
1549 ; CHECK-LE-NEXT: add sp, #36
1550 ; CHECK-LE-NEXT: bx lr
1552 ; CHECK-BE-LABEL: masked_v8f16_align1_undef:
1553 ; CHECK-BE: @ %bb.0: @ %entry
1554 ; CHECK-BE-NEXT: .pad #36
1555 ; CHECK-BE-NEXT: sub sp, #36
1556 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1557 ; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr
1558 ; CHECK-BE-NEXT: @ implicit-def: $q1
1559 ; CHECK-BE-NEXT: vmrs r1, p0
1560 ; CHECK-BE-NEXT: ubfx r2, r1, #14, #1
1561 ; CHECK-BE-NEXT: rsbs r3, r2, #0
1562 ; CHECK-BE-NEXT: movs r2, #0
1563 ; CHECK-BE-NEXT: bfi r2, r3, #0, #1
1564 ; CHECK-BE-NEXT: ubfx r3, r1, #12, #1
1565 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1566 ; CHECK-BE-NEXT: bfi r2, r3, #1, #1
1567 ; CHECK-BE-NEXT: ubfx r3, r1, #10, #1
1568 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1569 ; CHECK-BE-NEXT: bfi r2, r3, #2, #1
1570 ; CHECK-BE-NEXT: ubfx r3, r1, #8, #1
1571 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1572 ; CHECK-BE-NEXT: bfi r2, r3, #3, #1
1573 ; CHECK-BE-NEXT: ubfx r3, r1, #6, #1
1574 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1575 ; CHECK-BE-NEXT: bfi r2, r3, #4, #1
1576 ; CHECK-BE-NEXT: ubfx r3, r1, #4, #1
1577 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1578 ; CHECK-BE-NEXT: bfi r2, r3, #5, #1
1579 ; CHECK-BE-NEXT: ubfx r3, r1, #2, #1
1580 ; CHECK-BE-NEXT: and r1, r1, #1
1581 ; CHECK-BE-NEXT: rsbs r3, r3, #0
1582 ; CHECK-BE-NEXT: bfi r2, r3, #6, #1
1583 ; CHECK-BE-NEXT: rsbs r1, r1, #0
1584 ; CHECK-BE-NEXT: bfi r2, r1, #7, #1
1585 ; CHECK-BE-NEXT: uxtb r1, r2
1586 ; CHECK-BE-NEXT: lsls r2, r2, #24
1587 ; CHECK-BE-NEXT: bmi .LBB45_10
1588 ; CHECK-BE-NEXT: @ %bb.1: @ %else
1589 ; CHECK-BE-NEXT: lsls r2, r1, #25
1590 ; CHECK-BE-NEXT: bmi .LBB45_11
1591 ; CHECK-BE-NEXT: .LBB45_2: @ %else2
1592 ; CHECK-BE-NEXT: lsls r2, r1, #26
1593 ; CHECK-BE-NEXT: bmi .LBB45_12
1594 ; CHECK-BE-NEXT: .LBB45_3: @ %else5
1595 ; CHECK-BE-NEXT: lsls r2, r1, #27
1596 ; CHECK-BE-NEXT: bmi .LBB45_13
1597 ; CHECK-BE-NEXT: .LBB45_4: @ %else8
1598 ; CHECK-BE-NEXT: lsls r2, r1, #28
1599 ; CHECK-BE-NEXT: bmi .LBB45_14
1600 ; CHECK-BE-NEXT: .LBB45_5: @ %else11
1601 ; CHECK-BE-NEXT: lsls r2, r1, #29
1602 ; CHECK-BE-NEXT: bmi .LBB45_15
1603 ; CHECK-BE-NEXT: .LBB45_6: @ %else14
1604 ; CHECK-BE-NEXT: lsls r2, r1, #30
1605 ; CHECK-BE-NEXT: bmi .LBB45_16
1606 ; CHECK-BE-NEXT: .LBB45_7: @ %else17
1607 ; CHECK-BE-NEXT: lsls r1, r1, #31
1608 ; CHECK-BE-NEXT: beq .LBB45_9
1609 ; CHECK-BE-NEXT: .LBB45_8: @ %cond.load19
1610 ; CHECK-BE-NEXT: ldrh r0, [r0, #14]
1611 ; CHECK-BE-NEXT: strh.w r0, [sp]
1612 ; CHECK-BE-NEXT: vldr.16 s0, [sp]
1613 ; CHECK-BE-NEXT: vins.f16 s7, s0
1614 ; CHECK-BE-NEXT: .LBB45_9: @ %else20
1615 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1616 ; CHECK-BE-NEXT: add sp, #36
1617 ; CHECK-BE-NEXT: bx lr
1618 ; CHECK-BE-NEXT: .LBB45_10: @ %cond.load
1619 ; CHECK-BE-NEXT: ldrh r2, [r0]
1620 ; CHECK-BE-NEXT: strh.w r2, [sp, #28]
1621 ; CHECK-BE-NEXT: vldr.16 s4, [sp, #28]
1622 ; CHECK-BE-NEXT: lsls r2, r1, #25
1623 ; CHECK-BE-NEXT: bpl .LBB45_2
1624 ; CHECK-BE-NEXT: .LBB45_11: @ %cond.load1
1625 ; CHECK-BE-NEXT: ldrh r2, [r0, #2]
1626 ; CHECK-BE-NEXT: strh.w r2, [sp, #24]
1627 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #24]
1628 ; CHECK-BE-NEXT: vins.f16 s4, s0
1629 ; CHECK-BE-NEXT: lsls r2, r1, #26
1630 ; CHECK-BE-NEXT: bpl .LBB45_3
1631 ; CHECK-BE-NEXT: .LBB45_12: @ %cond.load4
1632 ; CHECK-BE-NEXT: ldrh r2, [r0, #4]
1633 ; CHECK-BE-NEXT: strh.w r2, [sp, #20]
1634 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #20]
1635 ; CHECK-BE-NEXT: vmov r2, s0
1636 ; CHECK-BE-NEXT: vmov.16 q1[2], r2
1637 ; CHECK-BE-NEXT: lsls r2, r1, #27
1638 ; CHECK-BE-NEXT: bpl .LBB45_4
1639 ; CHECK-BE-NEXT: .LBB45_13: @ %cond.load7
1640 ; CHECK-BE-NEXT: ldrh r2, [r0, #6]
1641 ; CHECK-BE-NEXT: strh.w r2, [sp, #16]
1642 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #16]
1643 ; CHECK-BE-NEXT: vins.f16 s5, s0
1644 ; CHECK-BE-NEXT: lsls r2, r1, #28
1645 ; CHECK-BE-NEXT: bpl .LBB45_5
1646 ; CHECK-BE-NEXT: .LBB45_14: @ %cond.load10
1647 ; CHECK-BE-NEXT: ldrh r2, [r0, #8]
1648 ; CHECK-BE-NEXT: strh.w r2, [sp, #12]
1649 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #12]
1650 ; CHECK-BE-NEXT: vmov r2, s0
1651 ; CHECK-BE-NEXT: vmov.16 q1[4], r2
1652 ; CHECK-BE-NEXT: lsls r2, r1, #29
1653 ; CHECK-BE-NEXT: bpl .LBB45_6
1654 ; CHECK-BE-NEXT: .LBB45_15: @ %cond.load13
1655 ; CHECK-BE-NEXT: ldrh r2, [r0, #10]
1656 ; CHECK-BE-NEXT: strh.w r2, [sp, #8]
1657 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #8]
1658 ; CHECK-BE-NEXT: vins.f16 s6, s0
1659 ; CHECK-BE-NEXT: lsls r2, r1, #30
1660 ; CHECK-BE-NEXT: bpl .LBB45_7
1661 ; CHECK-BE-NEXT: .LBB45_16: @ %cond.load16
1662 ; CHECK-BE-NEXT: ldrh r2, [r0, #12]
1663 ; CHECK-BE-NEXT: strh.w r2, [sp, #4]
1664 ; CHECK-BE-NEXT: vldr.16 s0, [sp, #4]
1665 ; CHECK-BE-NEXT: vmov r2, s0
1666 ; CHECK-BE-NEXT: vmov.16 q1[6], r2
1667 ; CHECK-BE-NEXT: lsls r1, r1, #31
1668 ; CHECK-BE-NEXT: bne .LBB45_8
1669 ; CHECK-BE-NEXT: b .LBB45_9
1671 %c = icmp sgt <8 x i16> %a, zeroinitializer
1672 %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 1, <8 x i1> %c, <8 x half> undef)
1676 define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align4_other(<8 x half> *%dest, <8 x i16> %a, <8 x half> %b) {
1677 ; CHECK-LE-LABEL: masked_v8f16_align4_other:
1678 ; CHECK-LE: @ %bb.0: @ %entry
1679 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1680 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0]
1681 ; CHECK-LE-NEXT: vpsel q0, q0, q1
1682 ; CHECK-LE-NEXT: bx lr
1684 ; CHECK-BE-LABEL: masked_v8f16_align4_other:
1685 ; CHECK-BE: @ %bb.0: @ %entry
1686 ; CHECK-BE-NEXT: vrev64.16 q2, q1
1687 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1688 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1689 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0]
1690 ; CHECK-BE-NEXT: vpsel q1, q0, q2
1691 ; CHECK-BE-NEXT: vrev64.16 q0, q1
1692 ; CHECK-BE-NEXT: bx lr
1694 %c = icmp sgt <8 x i16> %a, zeroinitializer
1695 %l = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %dest, i32 2, <8 x i1> %c, <8 x half> %b)
1699 define arm_aapcs_vfpcc i8* @masked_v8f16_preinc(i8* %x, i8* %y, <8 x i16> %a) {
1700 ; CHECK-LE-LABEL: masked_v8f16_preinc:
1701 ; CHECK-LE: @ %bb.0: @ %entry
1702 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1703 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0, #4]!
1704 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1705 ; CHECK-LE-NEXT: bx lr
1707 ; CHECK-BE-LABEL: masked_v8f16_preinc:
1708 ; CHECK-BE: @ %bb.0: @ %entry
1709 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1710 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1711 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0, #4]!
1712 ; CHECK-BE-NEXT: vstrh.16 q0, [r1]
1713 ; CHECK-BE-NEXT: bx lr
1715 %z = getelementptr inbounds i8, i8* %x, i32 4
1716 %0 = bitcast i8* %z to <8 x half>*
1717 %c = icmp sgt <8 x i16> %a, zeroinitializer
1718 %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef)
1719 %2 = bitcast i8* %y to <8 x half>*
1720 store <8 x half> %1, <8 x half>* %2, align 4
1724 define arm_aapcs_vfpcc i8* @masked_v8f16_postinc(i8* %x, i8* %y, <8 x i16> %a) {
1725 ; CHECK-LE-LABEL: masked_v8f16_postinc:
1726 ; CHECK-LE: @ %bb.0: @ %entry
1727 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
1728 ; CHECK-LE-NEXT: vldrht.u16 q0, [r0], #4
1729 ; CHECK-LE-NEXT: vstrw.32 q0, [r1]
1730 ; CHECK-LE-NEXT: bx lr
1732 ; CHECK-BE-LABEL: masked_v8f16_postinc:
1733 ; CHECK-BE: @ %bb.0: @ %entry
1734 ; CHECK-BE-NEXT: vrev64.16 q1, q0
1735 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
1736 ; CHECK-BE-NEXT: vldrht.u16 q0, [r0], #4
1737 ; CHECK-BE-NEXT: vstrh.16 q0, [r1]
1738 ; CHECK-BE-NEXT: bx lr
1740 %z = getelementptr inbounds i8, i8* %x, i32 4
1741 %0 = bitcast i8* %x to <8 x half>*
1742 %c = icmp sgt <8 x i16> %a, zeroinitializer
1743 %1 = call <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>* %0, i32 4, <8 x i1> %c, <8 x half> undef)
1744 %2 = bitcast i8* %y to <8 x half>*
1745 store <8 x half> %1, <8 x half>* %2, align 4
1750 define arm_aapcs_vfpcc <2 x i64> @masked_v2i64_align4_zero(<2 x i64> *%dest, <2 x i64> %a) {
1751 ; CHECK-LE-LABEL: masked_v2i64_align4_zero:
1752 ; CHECK-LE: @ %bb.0: @ %entry
1753 ; CHECK-LE-NEXT: .save {r7, lr}
1754 ; CHECK-LE-NEXT: push {r7, lr}
1755 ; CHECK-LE-NEXT: .pad #4
1756 ; CHECK-LE-NEXT: sub sp, #4
1757 ; CHECK-LE-NEXT: vmov r1, r2, d0
1758 ; CHECK-LE-NEXT: movs r3, #0
1759 ; CHECK-LE-NEXT: vmov lr, r12, d1
1760 ; CHECK-LE-NEXT: rsbs r1, r1, #0
1761 ; CHECK-LE-NEXT: sbcs.w r1, r3, r2
1762 ; CHECK-LE-NEXT: mov.w r1, #0
1763 ; CHECK-LE-NEXT: it lt
1764 ; CHECK-LE-NEXT: movlt r1, #1
1765 ; CHECK-LE-NEXT: rsbs.w r2, lr, #0
1766 ; CHECK-LE-NEXT: sbcs.w r2, r3, r12
1767 ; CHECK-LE-NEXT: it lt
1768 ; CHECK-LE-NEXT: movlt r3, #1
1769 ; CHECK-LE-NEXT: cmp r3, #0
1770 ; CHECK-LE-NEXT: it ne
1771 ; CHECK-LE-NEXT: mvnne r3, #1
1772 ; CHECK-LE-NEXT: bfi r3, r1, #0, #1
1773 ; CHECK-LE-NEXT: and r1, r3, #3
1774 ; CHECK-LE-NEXT: lsls r2, r3, #31
1775 ; CHECK-LE-NEXT: beq .LBB49_2
1776 ; CHECK-LE-NEXT: @ %bb.1: @ %cond.load
1777 ; CHECK-LE-NEXT: vldr d1, .LCPI49_0
1778 ; CHECK-LE-NEXT: vldr d0, [r0]
1779 ; CHECK-LE-NEXT: b .LBB49_3
1780 ; CHECK-LE-NEXT: .LBB49_2:
1781 ; CHECK-LE-NEXT: vmov.i32 q0, #0x0
1782 ; CHECK-LE-NEXT: .LBB49_3: @ %else
1783 ; CHECK-LE-NEXT: lsls r1, r1, #30
1784 ; CHECK-LE-NEXT: it mi
1785 ; CHECK-LE-NEXT: vldrmi d1, [r0, #8]
1786 ; CHECK-LE-NEXT: add sp, #4
1787 ; CHECK-LE-NEXT: pop {r7, pc}
1788 ; CHECK-LE-NEXT: .p2align 3
1789 ; CHECK-LE-NEXT: @ %bb.4:
1790 ; CHECK-LE-NEXT: .LCPI49_0:
1791 ; CHECK-LE-NEXT: .long 0 @ double 0
1792 ; CHECK-LE-NEXT: .long 0
1794 ; CHECK-BE-LABEL: masked_v2i64_align4_zero:
1795 ; CHECK-BE: @ %bb.0: @ %entry
1796 ; CHECK-BE-NEXT: .save {r7, lr}
1797 ; CHECK-BE-NEXT: push {r7, lr}
1798 ; CHECK-BE-NEXT: .pad #4
1799 ; CHECK-BE-NEXT: sub sp, #4
1800 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1801 ; CHECK-BE-NEXT: movs r3, #0
1802 ; CHECK-BE-NEXT: vmov r1, r2, d3
1803 ; CHECK-BE-NEXT: vmov r12, lr, d2
1804 ; CHECK-BE-NEXT: rsbs r2, r2, #0
1805 ; CHECK-BE-NEXT: sbcs.w r1, r3, r1
1806 ; CHECK-BE-NEXT: mov.w r1, #0
1807 ; CHECK-BE-NEXT: it lt
1808 ; CHECK-BE-NEXT: movlt r1, #1
1809 ; CHECK-BE-NEXT: rsbs.w r2, lr, #0
1810 ; CHECK-BE-NEXT: sbcs.w r2, r3, r12
1811 ; CHECK-BE-NEXT: it lt
1812 ; CHECK-BE-NEXT: movlt r3, #1
1813 ; CHECK-BE-NEXT: cmp r3, #0
1814 ; CHECK-BE-NEXT: it ne
1815 ; CHECK-BE-NEXT: mvnne r3, #1
1816 ; CHECK-BE-NEXT: bfi r3, r1, #0, #1
1817 ; CHECK-BE-NEXT: and r1, r3, #3
1818 ; CHECK-BE-NEXT: lsls r2, r3, #30
1819 ; CHECK-BE-NEXT: bpl .LBB49_2
1820 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
1821 ; CHECK-BE-NEXT: vldr d1, .LCPI49_0
1822 ; CHECK-BE-NEXT: vldr d0, [r0]
1823 ; CHECK-BE-NEXT: b .LBB49_3
1824 ; CHECK-BE-NEXT: .LBB49_2:
1825 ; CHECK-BE-NEXT: vmov.i32 q0, #0x0
1826 ; CHECK-BE-NEXT: .LBB49_3: @ %else
1827 ; CHECK-BE-NEXT: lsls r1, r1, #31
1828 ; CHECK-BE-NEXT: it ne
1829 ; CHECK-BE-NEXT: vldrne d1, [r0, #8]
1830 ; CHECK-BE-NEXT: add sp, #4
1831 ; CHECK-BE-NEXT: pop {r7, pc}
1832 ; CHECK-BE-NEXT: .p2align 3
1833 ; CHECK-BE-NEXT: @ %bb.4:
1834 ; CHECK-BE-NEXT: .LCPI49_0:
1835 ; CHECK-BE-NEXT: .long 0 @ double 0
1836 ; CHECK-BE-NEXT: .long 0
1838 %c = icmp sgt <2 x i64> %a, zeroinitializer
1839 %l = call <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>* %dest, i32 8, <2 x i1> %c, <2 x i64> zeroinitializer)
1843 define arm_aapcs_vfpcc <2 x double> @masked_v2f64_align4_zero(<2 x double> *%dest, <2 x double> %a, <2 x i64> %b) {
1844 ; CHECK-LE-LABEL: masked_v2f64_align4_zero:
1845 ; CHECK-LE: @ %bb.0: @ %entry
1846 ; CHECK-LE-NEXT: .save {r7, lr}
1847 ; CHECK-LE-NEXT: push {r7, lr}
1848 ; CHECK-LE-NEXT: .pad #4
1849 ; CHECK-LE-NEXT: sub sp, #4
1850 ; CHECK-LE-NEXT: vmov r1, r2, d2
1851 ; CHECK-LE-NEXT: movs r3, #0
1852 ; CHECK-LE-NEXT: vmov lr, r12, d3
1853 ; CHECK-LE-NEXT: rsbs r1, r1, #0
1854 ; CHECK-LE-NEXT: sbcs.w r1, r3, r2
1855 ; CHECK-LE-NEXT: mov.w r1, #0
1856 ; CHECK-LE-NEXT: it lt
1857 ; CHECK-LE-NEXT: movlt r1, #1
1858 ; CHECK-LE-NEXT: rsbs.w r2, lr, #0
1859 ; CHECK-LE-NEXT: sbcs.w r2, r3, r12
1860 ; CHECK-LE-NEXT: it lt
1861 ; CHECK-LE-NEXT: movlt r3, #1
1862 ; CHECK-LE-NEXT: cmp r3, #0
1863 ; CHECK-LE-NEXT: it ne
1864 ; CHECK-LE-NEXT: mvnne r3, #1
1865 ; CHECK-LE-NEXT: bfi r3, r1, #0, #1
1866 ; CHECK-LE-NEXT: and r1, r3, #3
1867 ; CHECK-LE-NEXT: lsls r2, r3, #31
1868 ; CHECK-LE-NEXT: beq .LBB50_2
1869 ; CHECK-LE-NEXT: @ %bb.1: @ %cond.load
1870 ; CHECK-LE-NEXT: vldr d1, .LCPI50_0
1871 ; CHECK-LE-NEXT: vldr d0, [r0]
1872 ; CHECK-LE-NEXT: b .LBB50_3
1873 ; CHECK-LE-NEXT: .LBB50_2:
1874 ; CHECK-LE-NEXT: vmov.i32 q0, #0x0
1875 ; CHECK-LE-NEXT: .LBB50_3: @ %else
1876 ; CHECK-LE-NEXT: lsls r1, r1, #30
1877 ; CHECK-LE-NEXT: it mi
1878 ; CHECK-LE-NEXT: vldrmi d1, [r0, #8]
1879 ; CHECK-LE-NEXT: add sp, #4
1880 ; CHECK-LE-NEXT: pop {r7, pc}
1881 ; CHECK-LE-NEXT: .p2align 3
1882 ; CHECK-LE-NEXT: @ %bb.4:
1883 ; CHECK-LE-NEXT: .LCPI50_0:
1884 ; CHECK-LE-NEXT: .long 0 @ double 0
1885 ; CHECK-LE-NEXT: .long 0
1887 ; CHECK-BE-LABEL: masked_v2f64_align4_zero:
1888 ; CHECK-BE: @ %bb.0: @ %entry
1889 ; CHECK-BE-NEXT: .save {r7, lr}
1890 ; CHECK-BE-NEXT: push {r7, lr}
1891 ; CHECK-BE-NEXT: .pad #4
1892 ; CHECK-BE-NEXT: sub sp, #4
1893 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1894 ; CHECK-BE-NEXT: movs r3, #0
1895 ; CHECK-BE-NEXT: vmov r1, r2, d1
1896 ; CHECK-BE-NEXT: vmov r12, lr, d0
1897 ; CHECK-BE-NEXT: rsbs r2, r2, #0
1898 ; CHECK-BE-NEXT: sbcs.w r1, r3, r1
1899 ; CHECK-BE-NEXT: mov.w r1, #0
1900 ; CHECK-BE-NEXT: it lt
1901 ; CHECK-BE-NEXT: movlt r1, #1
1902 ; CHECK-BE-NEXT: rsbs.w r2, lr, #0
1903 ; CHECK-BE-NEXT: sbcs.w r2, r3, r12
1904 ; CHECK-BE-NEXT: it lt
1905 ; CHECK-BE-NEXT: movlt r3, #1
1906 ; CHECK-BE-NEXT: cmp r3, #0
1907 ; CHECK-BE-NEXT: it ne
1908 ; CHECK-BE-NEXT: mvnne r3, #1
1909 ; CHECK-BE-NEXT: bfi r3, r1, #0, #1
1910 ; CHECK-BE-NEXT: and r1, r3, #3
1911 ; CHECK-BE-NEXT: lsls r2, r3, #30
1912 ; CHECK-BE-NEXT: bpl .LBB50_2
1913 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
1914 ; CHECK-BE-NEXT: vldr d1, .LCPI50_0
1915 ; CHECK-BE-NEXT: vldr d0, [r0]
1916 ; CHECK-BE-NEXT: b .LBB50_3
1917 ; CHECK-BE-NEXT: .LBB50_2:
1918 ; CHECK-BE-NEXT: vmov.i32 q0, #0x0
1919 ; CHECK-BE-NEXT: .LBB50_3: @ %else
1920 ; CHECK-BE-NEXT: lsls r1, r1, #31
1921 ; CHECK-BE-NEXT: it ne
1922 ; CHECK-BE-NEXT: vldrne d1, [r0, #8]
1923 ; CHECK-BE-NEXT: add sp, #4
1924 ; CHECK-BE-NEXT: pop {r7, pc}
1925 ; CHECK-BE-NEXT: .p2align 3
1926 ; CHECK-BE-NEXT: @ %bb.4:
1927 ; CHECK-BE-NEXT: .LCPI50_0:
1928 ; CHECK-BE-NEXT: .long 0 @ double 0
1929 ; CHECK-BE-NEXT: .long 0
1931 %c = icmp sgt <2 x i64> %b, zeroinitializer
1932 %l = call <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>* %dest, i32 8, <2 x i1> %c, <2 x double> zeroinitializer)
1936 define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16(<4 x i16> *%dest, <4 x i32> %a) {
1937 ; CHECK-LE-LABEL: anyext_v4i16:
1938 ; CHECK-LE: @ %bb.0: @ %entry
1939 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
1940 ; CHECK-LE-NEXT: vldrht.u32 q0, [r0]
1941 ; CHECK-LE-NEXT: bx lr
1943 ; CHECK-BE-LABEL: anyext_v4i16:
1944 ; CHECK-BE: @ %bb.0: @ %entry
1945 ; CHECK-BE-NEXT: vrev64.32 q1, q0
1946 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
1947 ; CHECK-BE-NEXT: vldrht.u32 q1, [r0]
1948 ; CHECK-BE-NEXT: vrev64.32 q0, q1
1949 ; CHECK-BE-NEXT: bx lr
1951 %c = icmp sgt <4 x i32> %a, zeroinitializer
1952 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
1956 define arm_aapcs_vfpcc <4 x i16> @anyext_v4i16_align1(<4 x i16> *%dest, <4 x i32> %a) {
1957 ; CHECK-LE-LABEL: anyext_v4i16_align1:
1958 ; CHECK-LE: @ %bb.0: @ %entry
1959 ; CHECK-LE-NEXT: .pad #4
1960 ; CHECK-LE-NEXT: sub sp, #4
1961 ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr
1962 ; CHECK-LE-NEXT: mov.w r12, #0
1963 ; CHECK-LE-NEXT: vmrs r3, p0
1964 ; CHECK-LE-NEXT: and r1, r3, #1
1965 ; CHECK-LE-NEXT: rsbs r2, r1, #0
1966 ; CHECK-LE-NEXT: movs r1, #0
1967 ; CHECK-LE-NEXT: bfi r1, r2, #0, #1
1968 ; CHECK-LE-NEXT: ubfx r2, r3, #4, #1
1969 ; CHECK-LE-NEXT: rsbs r2, r2, #0
1970 ; CHECK-LE-NEXT: bfi r1, r2, #1, #1
1971 ; CHECK-LE-NEXT: ubfx r2, r3, #8, #1
1972 ; CHECK-LE-NEXT: rsbs r2, r2, #0
1973 ; CHECK-LE-NEXT: bfi r1, r2, #2, #1
1974 ; CHECK-LE-NEXT: ubfx r2, r3, #12, #1
1975 ; CHECK-LE-NEXT: rsbs r2, r2, #0
1976 ; CHECK-LE-NEXT: bfi r1, r2, #3, #1
1977 ; CHECK-LE-NEXT: lsls r2, r1, #31
1978 ; CHECK-LE-NEXT: beq .LBB52_2
1979 ; CHECK-LE-NEXT: @ %bb.1: @ %cond.load
1980 ; CHECK-LE-NEXT: ldrh r2, [r0]
1981 ; CHECK-LE-NEXT: vdup.32 q0, r12
1982 ; CHECK-LE-NEXT: vmov.32 q0[0], r2
1983 ; CHECK-LE-NEXT: b .LBB52_3
1984 ; CHECK-LE-NEXT: .LBB52_2:
1985 ; CHECK-LE-NEXT: vmov.i32 q0, #0x0
1986 ; CHECK-LE-NEXT: .LBB52_3: @ %else
1987 ; CHECK-LE-NEXT: lsls r2, r1, #30
1988 ; CHECK-LE-NEXT: itt mi
1989 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #2]
1990 ; CHECK-LE-NEXT: vmovmi.32 q0[1], r2
1991 ; CHECK-LE-NEXT: lsls r2, r1, #29
1992 ; CHECK-LE-NEXT: itt mi
1993 ; CHECK-LE-NEXT: ldrhmi r2, [r0, #4]
1994 ; CHECK-LE-NEXT: vmovmi.32 q0[2], r2
1995 ; CHECK-LE-NEXT: lsls r1, r1, #28
1996 ; CHECK-LE-NEXT: itt mi
1997 ; CHECK-LE-NEXT: ldrhmi r0, [r0, #6]
1998 ; CHECK-LE-NEXT: vmovmi.32 q0[3], r0
1999 ; CHECK-LE-NEXT: add sp, #4
2000 ; CHECK-LE-NEXT: bx lr
2002 ; CHECK-BE-LABEL: anyext_v4i16_align1:
2003 ; CHECK-BE: @ %bb.0: @ %entry
2004 ; CHECK-BE-NEXT: .pad #4
2005 ; CHECK-BE-NEXT: sub sp, #4
2006 ; CHECK-BE-NEXT: vrev64.32 q1, q0
2007 ; CHECK-BE-NEXT: mov.w r12, #0
2008 ; CHECK-BE-NEXT: vcmp.s32 gt, q1, zr
2009 ; CHECK-BE-NEXT: vmrs r3, p0
2010 ; CHECK-BE-NEXT: ubfx r1, r3, #12, #1
2011 ; CHECK-BE-NEXT: rsbs r2, r1, #0
2012 ; CHECK-BE-NEXT: movs r1, #0
2013 ; CHECK-BE-NEXT: bfi r1, r2, #0, #1
2014 ; CHECK-BE-NEXT: ubfx r2, r3, #8, #1
2015 ; CHECK-BE-NEXT: rsbs r2, r2, #0
2016 ; CHECK-BE-NEXT: bfi r1, r2, #1, #1
2017 ; CHECK-BE-NEXT: ubfx r2, r3, #4, #1
2018 ; CHECK-BE-NEXT: rsbs r2, r2, #0
2019 ; CHECK-BE-NEXT: bfi r1, r2, #2, #1
2020 ; CHECK-BE-NEXT: and r2, r3, #1
2021 ; CHECK-BE-NEXT: rsbs r2, r2, #0
2022 ; CHECK-BE-NEXT: bfi r1, r2, #3, #1
2023 ; CHECK-BE-NEXT: lsls r2, r1, #28
2024 ; CHECK-BE-NEXT: bpl .LBB52_2
2025 ; CHECK-BE-NEXT: @ %bb.1: @ %cond.load
2026 ; CHECK-BE-NEXT: ldrh r2, [r0]
2027 ; CHECK-BE-NEXT: vdup.32 q1, r12
2028 ; CHECK-BE-NEXT: vmov.32 q1[0], r2
2029 ; CHECK-BE-NEXT: b .LBB52_3
2030 ; CHECK-BE-NEXT: .LBB52_2:
2031 ; CHECK-BE-NEXT: vmov.i32 q1, #0x0
2032 ; CHECK-BE-NEXT: .LBB52_3: @ %else
2033 ; CHECK-BE-NEXT: lsls r2, r1, #29
2034 ; CHECK-BE-NEXT: itt mi
2035 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #2]
2036 ; CHECK-BE-NEXT: vmovmi.32 q1[1], r2
2037 ; CHECK-BE-NEXT: lsls r2, r1, #30
2038 ; CHECK-BE-NEXT: itt mi
2039 ; CHECK-BE-NEXT: ldrhmi r2, [r0, #4]
2040 ; CHECK-BE-NEXT: vmovmi.32 q1[2], r2
2041 ; CHECK-BE-NEXT: lsls r1, r1, #31
2042 ; CHECK-BE-NEXT: itt ne
2043 ; CHECK-BE-NEXT: ldrhne r0, [r0, #6]
2044 ; CHECK-BE-NEXT: vmovne.32 q1[3], r0
2045 ; CHECK-BE-NEXT: vrev64.32 q0, q1
2046 ; CHECK-BE-NEXT: add sp, #4
2047 ; CHECK-BE-NEXT: bx lr
2049 %c = icmp sgt <4 x i32> %a, zeroinitializer
2050 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 1, <4 x i1> %c, <4 x i16> zeroinitializer)
2054 define arm_aapcs_vfpcc <4 x i8> @anyext_v4i8(<4 x i8> *%dest, <4 x i32> %a) {
2055 ; CHECK-LE-LABEL: anyext_v4i8:
2056 ; CHECK-LE: @ %bb.0: @ %entry
2057 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
2058 ; CHECK-LE-NEXT: vldrbt.u32 q0, [r0]
2059 ; CHECK-LE-NEXT: bx lr
2061 ; CHECK-BE-LABEL: anyext_v4i8:
2062 ; CHECK-BE: @ %bb.0: @ %entry
2063 ; CHECK-BE-NEXT: vrev64.32 q1, q0
2064 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
2065 ; CHECK-BE-NEXT: vldrbt.u32 q1, [r0]
2066 ; CHECK-BE-NEXT: vrev64.32 q0, q1
2067 ; CHECK-BE-NEXT: bx lr
2069 %c = icmp sgt <4 x i32> %a, zeroinitializer
2070 %l = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %dest, i32 1, <4 x i1> %c, <4 x i8> zeroinitializer)
2074 define arm_aapcs_vfpcc <8 x i8> @anyext_v8i8(<8 x i8> *%dest, <8 x i16> %a) {
2075 ; CHECK-LE-LABEL: anyext_v8i8:
2076 ; CHECK-LE: @ %bb.0: @ %entry
2077 ; CHECK-LE-NEXT: vpt.s16 gt, q0, zr
2078 ; CHECK-LE-NEXT: vldrbt.u16 q0, [r0]
2079 ; CHECK-LE-NEXT: bx lr
2081 ; CHECK-BE-LABEL: anyext_v8i8:
2082 ; CHECK-BE: @ %bb.0: @ %entry
2083 ; CHECK-BE-NEXT: vrev64.16 q1, q0
2084 ; CHECK-BE-NEXT: vpt.s16 gt, q1, zr
2085 ; CHECK-BE-NEXT: vldrbt.u16 q1, [r0]
2086 ; CHECK-BE-NEXT: vrev64.16 q0, q1
2087 ; CHECK-BE-NEXT: bx lr
2089 %c = icmp sgt <8 x i16> %a, zeroinitializer
2090 %l = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %dest, i32 1, <8 x i1> %c, <8 x i8> zeroinitializer)
2094 define arm_aapcs_vfpcc <4 x i32> @multi_user_zext(<4 x i16> *%dest, <4 x i32> %a) {
2095 ; CHECK-LE-LABEL: multi_user_zext:
2096 ; CHECK-LE: @ %bb.0: @ %entry
2097 ; CHECK-LE-NEXT: .save {r7, lr}
2098 ; CHECK-LE-NEXT: push {r7, lr}
2099 ; CHECK-LE-NEXT: .vsave {d8, d9}
2100 ; CHECK-LE-NEXT: vpush {d8, d9}
2101 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
2102 ; CHECK-LE-NEXT: vldrht.u32 q4, [r0]
2103 ; CHECK-LE-NEXT: vmov r0, r1, d8
2104 ; CHECK-LE-NEXT: vmov r2, r3, d9
2105 ; CHECK-LE-NEXT: bl foo
2106 ; CHECK-LE-NEXT: vmovlb.u16 q0, q4
2107 ; CHECK-LE-NEXT: vpop {d8, d9}
2108 ; CHECK-LE-NEXT: pop {r7, pc}
2110 ; CHECK-BE-LABEL: multi_user_zext:
2111 ; CHECK-BE: @ %bb.0: @ %entry
2112 ; CHECK-BE-NEXT: .save {r7, lr}
2113 ; CHECK-BE-NEXT: push {r7, lr}
2114 ; CHECK-BE-NEXT: .vsave {d8, d9}
2115 ; CHECK-BE-NEXT: vpush {d8, d9}
2116 ; CHECK-BE-NEXT: vrev64.32 q1, q0
2117 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
2118 ; CHECK-BE-NEXT: vldrht.u32 q4, [r0]
2119 ; CHECK-BE-NEXT: vrev64.32 q0, q4
2120 ; CHECK-BE-NEXT: vmov r1, r0, d0
2121 ; CHECK-BE-NEXT: vmov r3, r2, d1
2122 ; CHECK-BE-NEXT: bl foo
2123 ; CHECK-BE-NEXT: vmovlb.u16 q1, q4
2124 ; CHECK-BE-NEXT: vrev64.32 q0, q1
2125 ; CHECK-BE-NEXT: vpop {d8, d9}
2126 ; CHECK-BE-NEXT: pop {r7, pc}
2128 %c = icmp sgt <4 x i32> %a, zeroinitializer
2129 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
2130 call void @foo(<4 x i16> %l)
2131 %ext = zext <4 x i16> %l to <4 x i32>
2135 define arm_aapcs_vfpcc <4 x i32> @multi_user_sext(<4 x i16> *%dest, <4 x i32> %a) {
2136 ; CHECK-LE-LABEL: multi_user_sext:
2137 ; CHECK-LE: @ %bb.0: @ %entry
2138 ; CHECK-LE-NEXT: .save {r7, lr}
2139 ; CHECK-LE-NEXT: push {r7, lr}
2140 ; CHECK-LE-NEXT: .vsave {d8, d9}
2141 ; CHECK-LE-NEXT: vpush {d8, d9}
2142 ; CHECK-LE-NEXT: vpt.s32 gt, q0, zr
2143 ; CHECK-LE-NEXT: vldrht.u32 q4, [r0]
2144 ; CHECK-LE-NEXT: vmov r0, r1, d8
2145 ; CHECK-LE-NEXT: vmov r2, r3, d9
2146 ; CHECK-LE-NEXT: bl foo
2147 ; CHECK-LE-NEXT: vmovlb.s16 q0, q4
2148 ; CHECK-LE-NEXT: vpop {d8, d9}
2149 ; CHECK-LE-NEXT: pop {r7, pc}
2151 ; CHECK-BE-LABEL: multi_user_sext:
2152 ; CHECK-BE: @ %bb.0: @ %entry
2153 ; CHECK-BE-NEXT: .save {r7, lr}
2154 ; CHECK-BE-NEXT: push {r7, lr}
2155 ; CHECK-BE-NEXT: .vsave {d8, d9}
2156 ; CHECK-BE-NEXT: vpush {d8, d9}
2157 ; CHECK-BE-NEXT: vrev64.32 q1, q0
2158 ; CHECK-BE-NEXT: vpt.s32 gt, q1, zr
2159 ; CHECK-BE-NEXT: vldrht.u32 q4, [r0]
2160 ; CHECK-BE-NEXT: vrev64.32 q0, q4
2161 ; CHECK-BE-NEXT: vmov r1, r0, d0
2162 ; CHECK-BE-NEXT: vmov r3, r2, d1
2163 ; CHECK-BE-NEXT: bl foo
2164 ; CHECK-BE-NEXT: vmovlb.s16 q1, q4
2165 ; CHECK-BE-NEXT: vrev64.32 q0, q1
2166 ; CHECK-BE-NEXT: vpop {d8, d9}
2167 ; CHECK-BE-NEXT: pop {r7, pc}
2169 %c = icmp sgt <4 x i32> %a, zeroinitializer
2170 %l = call <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>* %dest, i32 2, <4 x i1> %c, <4 x i16> zeroinitializer)
2171 call void @foo(<4 x i16> %l)
2172 %ext = sext <4 x i16> %l to <4 x i32>
2176 declare <4 x i16> @llvm.masked.load.v4i16.p0v4i16(<4 x i16>*, i32, <4 x i1>, <4 x i16>)
2177 declare <4 x i32> @llvm.masked.load.v4i32.p0v4i32(<4 x i32>*, i32, <4 x i1>, <4 x i32>)
2178 declare <8 x i16> @llvm.masked.load.v8i16.p0v8i16(<8 x i16>*, i32, <8 x i1>, <8 x i16>)
2179 declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32, <4 x i1>, <4 x i8>)
2180 declare <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>*, i32, <8 x i1>, <8 x i8>)
2181 declare <16 x i8> @llvm.masked.load.v16i8.p0v16i8(<16 x i8>*, i32, <16 x i1>, <16 x i8>)
2182 declare <4 x float> @llvm.masked.load.v4f32.p0v4f32(<4 x float>*, i32, <4 x i1>, <4 x float>)
2183 declare <8 x half> @llvm.masked.load.v8f16.p0v8f16(<8 x half>*, i32, <8 x i1>, <8 x half>)
2184 declare <2 x i64> @llvm.masked.load.v2i64.p0v2i64(<2 x i64>*, i32, <2 x i1>, <2 x i64>)
2185 declare <2 x double> @llvm.masked.load.v2f64.p0v2f64(<2 x double>*, i32, <2 x i1>, <2 x double>)
2186 declare void @foo(<4 x i16>)