1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp %s -o - | FileCheck %s
4 define arm_aapcs_vfpcc <8 x i8> @s_v8i8(<8 x i8> %a, <8 x i8> %b) {
6 ; CHECK: @ %bb.0: @ %entry
7 ; CHECK-NEXT: vmovlb.s8 q1, q1
8 ; CHECK-NEXT: vmovlb.s8 q0, q0
9 ; CHECK-NEXT: vmov.i32 q2, #0x0
10 ; CHECK-NEXT: vmov.i16 q3, #0x1
11 ; CHECK-NEXT: vcmp.s16 gt, q0, q1
12 ; CHECK-NEXT: vpsel q2, q3, q2
13 ; CHECK-NEXT: vmov.i8 q3, #0xff
14 ; CHECK-NEXT: vcmp.s16 gt, q1, q0
15 ; CHECK-NEXT: vpsel q0, q3, q2
18 %c = call <8 x i8> @llvm.scmp(<8 x i8> %a, <8 x i8> %b)
22 define arm_aapcs_vfpcc <16 x i8> @s_v16i8(<16 x i8> %a, <16 x i8> %b) {
23 ; CHECK-LABEL: s_v16i8:
24 ; CHECK: @ %bb.0: @ %entry
25 ; CHECK-NEXT: vmov.i32 q2, #0x0
26 ; CHECK-NEXT: vmov.i8 q3, #0x1
27 ; CHECK-NEXT: vcmp.s8 gt, q0, q1
28 ; CHECK-NEXT: vpsel q2, q3, q2
29 ; CHECK-NEXT: vmov.i8 q3, #0xff
30 ; CHECK-NEXT: vcmp.s8 gt, q1, q0
31 ; CHECK-NEXT: vpsel q0, q3, q2
34 %c = call <16 x i8> @llvm.scmp(<16 x i8> %a, <16 x i8> %b)
38 define arm_aapcs_vfpcc <4 x i16> @s_v4i16(<4 x i16> %a, <4 x i16> %b) {
39 ; CHECK-LABEL: s_v4i16:
40 ; CHECK: @ %bb.0: @ %entry
41 ; CHECK-NEXT: vmovlb.s16 q1, q1
42 ; CHECK-NEXT: vmovlb.s16 q0, q0
43 ; CHECK-NEXT: vmov.i32 q2, #0x0
44 ; CHECK-NEXT: vmov.i32 q3, #0x1
45 ; CHECK-NEXT: vcmp.s32 gt, q0, q1
46 ; CHECK-NEXT: vpsel q2, q3, q2
47 ; CHECK-NEXT: vmov.i8 q3, #0xff
48 ; CHECK-NEXT: vcmp.s32 gt, q1, q0
49 ; CHECK-NEXT: vpsel q0, q3, q2
52 %c = call <4 x i16> @llvm.scmp(<4 x i16> %a, <4 x i16> %b)
56 define arm_aapcs_vfpcc <8 x i16> @s_v8i16(<8 x i16> %a, <8 x i16> %b) {
57 ; CHECK-LABEL: s_v8i16:
58 ; CHECK: @ %bb.0: @ %entry
59 ; CHECK-NEXT: vmov.i32 q2, #0x0
60 ; CHECK-NEXT: vmov.i16 q3, #0x1
61 ; CHECK-NEXT: vcmp.s16 gt, q0, q1
62 ; CHECK-NEXT: vpsel q2, q3, q2
63 ; CHECK-NEXT: vmov.i8 q3, #0xff
64 ; CHECK-NEXT: vcmp.s16 gt, q1, q0
65 ; CHECK-NEXT: vpsel q0, q3, q2
68 %c = call <8 x i16> @llvm.scmp(<8 x i16> %a, <8 x i16> %b)
72 define arm_aapcs_vfpcc <16 x i16> @s_v16i16(<16 x i16> %a, <16 x i16> %b) {
73 ; CHECK-LABEL: s_v16i16:
74 ; CHECK: @ %bb.0: @ %entry
75 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
76 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
77 ; CHECK-NEXT: vmov.i32 q4, #0x0
78 ; CHECK-NEXT: vmov.i16 q5, #0x1
79 ; CHECK-NEXT: vcmp.s16 gt, q0, q2
80 ; CHECK-NEXT: vmov.i8 q7, #0xff
81 ; CHECK-NEXT: vpsel q6, q5, q4
82 ; CHECK-NEXT: vcmp.s16 gt, q2, q0
83 ; CHECK-NEXT: vpsel q0, q7, q6
84 ; CHECK-NEXT: vcmp.s16 gt, q1, q3
85 ; CHECK-NEXT: vpsel q2, q5, q4
86 ; CHECK-NEXT: vcmp.s16 gt, q3, q1
87 ; CHECK-NEXT: vpsel q1, q7, q2
88 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
91 %c = call <16 x i16> @llvm.scmp(<16 x i16> %a, <16 x i16> %b)
95 define arm_aapcs_vfpcc <2 x i32> @s_v2i32(<2 x i32> %a, <2 x i32> %b) {
96 ; CHECK-LABEL: s_v2i32:
97 ; CHECK: @ %bb.0: @ %entry
98 ; CHECK-NEXT: .save {r4, r5, r6, lr}
99 ; CHECK-NEXT: push {r4, r5, r6, lr}
100 ; CHECK-NEXT: vmov r2, s4
101 ; CHECK-NEXT: adr.w r12, .LCPI5_0
102 ; CHECK-NEXT: vmov r1, s0
103 ; CHECK-NEXT: vldrw.u32 q3, [r12]
104 ; CHECK-NEXT: vmov r0, s6
105 ; CHECK-NEXT: movs r6, #0
106 ; CHECK-NEXT: vmov r4, s2
107 ; CHECK-NEXT: vmov.i32 q2, #0x0
108 ; CHECK-NEXT: vmov.i8 q1, #0xff
109 ; CHECK-NEXT: subs r3, r2, r1
110 ; CHECK-NEXT: asr.w lr, r2, #31
111 ; CHECK-NEXT: sbcs.w r3, lr, r1, asr #31
112 ; CHECK-NEXT: csetm r12, lt
113 ; CHECK-NEXT: movs r3, #0
114 ; CHECK-NEXT: subs r5, r0, r4
115 ; CHECK-NEXT: bfi r3, r12, #0, #8
116 ; CHECK-NEXT: asr.w r12, r0, #31
117 ; CHECK-NEXT: sbcs.w r5, r12, r4, asr #31
118 ; CHECK-NEXT: csetm r5, lt
119 ; CHECK-NEXT: bfi r3, r5, #8, #8
120 ; CHECK-NEXT: vmsr p0, r3
121 ; CHECK-NEXT: asrs r3, r1, #31
122 ; CHECK-NEXT: subs r1, r1, r2
123 ; CHECK-NEXT: vpsel q0, q3, q2
124 ; CHECK-NEXT: sbcs.w r1, r3, r2, asr #31
125 ; CHECK-NEXT: csetm r1, lt
126 ; CHECK-NEXT: subs r2, r4, r0
127 ; CHECK-NEXT: bfi r6, r1, #0, #8
128 ; CHECK-NEXT: asr.w r1, r4, #31
129 ; CHECK-NEXT: sbcs.w r0, r1, r0, asr #31
130 ; CHECK-NEXT: csetm r0, lt
131 ; CHECK-NEXT: bfi r6, r0, #8, #8
132 ; CHECK-NEXT: vmsr p0, r6
133 ; CHECK-NEXT: vpsel q0, q1, q0
134 ; CHECK-NEXT: pop {r4, r5, r6, pc}
135 ; CHECK-NEXT: .p2align 4
136 ; CHECK-NEXT: @ %bb.1:
137 ; CHECK-NEXT: .LCPI5_0:
138 ; CHECK-NEXT: .long 1 @ 0x1
139 ; CHECK-NEXT: .long 0 @ 0x0
140 ; CHECK-NEXT: .long 1 @ 0x1
141 ; CHECK-NEXT: .long 0 @ 0x0
143 %c = call <2 x i32> @llvm.scmp(<2 x i32> %a, <2 x i32> %b)
147 define arm_aapcs_vfpcc <4 x i32> @s_v4i32(<4 x i32> %a, <4 x i32> %b) {
148 ; CHECK-LABEL: s_v4i32:
149 ; CHECK: @ %bb.0: @ %entry
150 ; CHECK-NEXT: vmov.i32 q2, #0x0
151 ; CHECK-NEXT: vmov.i32 q3, #0x1
152 ; CHECK-NEXT: vcmp.s32 gt, q0, q1
153 ; CHECK-NEXT: vpsel q2, q3, q2
154 ; CHECK-NEXT: vmov.i8 q3, #0xff
155 ; CHECK-NEXT: vcmp.s32 gt, q1, q0
156 ; CHECK-NEXT: vpsel q0, q3, q2
159 %c = call <4 x i32> @llvm.scmp(<4 x i32> %a, <4 x i32> %b)
163 define arm_aapcs_vfpcc <8 x i32> @s_v8i32(<8 x i32> %a, <8 x i32> %b) {
164 ; CHECK-LABEL: s_v8i32:
165 ; CHECK: @ %bb.0: @ %entry
166 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
167 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
168 ; CHECK-NEXT: vmov.i32 q4, #0x0
169 ; CHECK-NEXT: vmov.i32 q5, #0x1
170 ; CHECK-NEXT: vcmp.s32 gt, q0, q2
171 ; CHECK-NEXT: vmov.i8 q7, #0xff
172 ; CHECK-NEXT: vpsel q6, q5, q4
173 ; CHECK-NEXT: vcmp.s32 gt, q2, q0
174 ; CHECK-NEXT: vpsel q0, q7, q6
175 ; CHECK-NEXT: vcmp.s32 gt, q1, q3
176 ; CHECK-NEXT: vpsel q2, q5, q4
177 ; CHECK-NEXT: vcmp.s32 gt, q3, q1
178 ; CHECK-NEXT: vpsel q1, q7, q2
179 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
182 %c = call <8 x i32> @llvm.scmp(<8 x i32> %a, <8 x i32> %b)
186 define arm_aapcs_vfpcc <2 x i64> @s_v2i64(<2 x i64> %a, <2 x i64> %b) {
187 ; CHECK-LABEL: s_v2i64:
188 ; CHECK: @ %bb.0: @ %entry
189 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
190 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
191 ; CHECK-NEXT: vmov lr, r12, d0
192 ; CHECK-NEXT: movs r4, #0
193 ; CHECK-NEXT: vmov r3, r8, d2
194 ; CHECK-NEXT: movs r0, #0
195 ; CHECK-NEXT: vmov r6, r7, d3
196 ; CHECK-NEXT: vmov.i32 q1, #0x0
197 ; CHECK-NEXT: subs.w r1, r3, lr
198 ; CHECK-NEXT: sbcs.w r1, r8, r12
199 ; CHECK-NEXT: csetm r1, lt
200 ; CHECK-NEXT: bfi r4, r1, #0, #8
201 ; CHECK-NEXT: vmov r1, r5, d1
202 ; CHECK-NEXT: subs r2, r6, r1
203 ; CHECK-NEXT: sbcs.w r2, r7, r5
204 ; CHECK-NEXT: csetm r2, lt
205 ; CHECK-NEXT: bfi r4, r2, #8, #8
206 ; CHECK-NEXT: adr r2, .LCPI8_0
207 ; CHECK-NEXT: vldrw.u32 q0, [r2]
208 ; CHECK-NEXT: subs.w r2, lr, r3
209 ; CHECK-NEXT: sbcs.w r2, r12, r8
210 ; CHECK-NEXT: vmsr p0, r4
211 ; CHECK-NEXT: csetm r2, lt
212 ; CHECK-NEXT: subs r1, r1, r6
213 ; CHECK-NEXT: sbcs.w r1, r5, r7
214 ; CHECK-NEXT: bfi r0, r2, #0, #8
215 ; CHECK-NEXT: csetm r1, lt
216 ; CHECK-NEXT: vpsel q0, q0, q1
217 ; CHECK-NEXT: bfi r0, r1, #8, #8
218 ; CHECK-NEXT: vmov.i8 q1, #0xff
219 ; CHECK-NEXT: vmsr p0, r0
220 ; CHECK-NEXT: vpsel q0, q1, q0
221 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
222 ; CHECK-NEXT: .p2align 4
223 ; CHECK-NEXT: @ %bb.1:
224 ; CHECK-NEXT: .LCPI8_0:
225 ; CHECK-NEXT: .long 1 @ 0x1
226 ; CHECK-NEXT: .long 0 @ 0x0
227 ; CHECK-NEXT: .long 1 @ 0x1
228 ; CHECK-NEXT: .long 0 @ 0x0
230 %c = call <2 x i64> @llvm.scmp(<2 x i64> %a, <2 x i64> %b)
234 define arm_aapcs_vfpcc <4 x i64> @s_v4i64(<4 x i64> %a, <4 x i64> %b) {
235 ; CHECK-LABEL: s_v4i64:
236 ; CHECK: @ %bb.0: @ %entry
237 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, lr}
238 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, lr}
239 ; CHECK-NEXT: .pad #4
240 ; CHECK-NEXT: sub sp, #4
241 ; CHECK-NEXT: .vsave {d8, d9, d10, d11}
242 ; CHECK-NEXT: vpush {d8, d9, d10, d11}
243 ; CHECK-NEXT: vmov lr, r12, d0
244 ; CHECK-NEXT: movs r4, #0
245 ; CHECK-NEXT: vmov r3, r8, d4
246 ; CHECK-NEXT: vmov.i32 q5, #0x0
247 ; CHECK-NEXT: vmov r6, r7, d5
248 ; CHECK-NEXT: mov.w r9, #0
249 ; CHECK-NEXT: vmov.i8 q2, #0xff
250 ; CHECK-NEXT: subs.w r1, r3, lr
251 ; CHECK-NEXT: sbcs.w r1, r8, r12
252 ; CHECK-NEXT: csetm r1, lt
253 ; CHECK-NEXT: bfi r4, r1, #0, #8
254 ; CHECK-NEXT: vmov r1, r5, d1
255 ; CHECK-NEXT: subs r2, r6, r1
256 ; CHECK-NEXT: sbcs.w r2, r7, r5
257 ; CHECK-NEXT: csetm r2, lt
258 ; CHECK-NEXT: bfi r4, r2, #8, #8
259 ; CHECK-NEXT: adr r2, .LCPI9_0
260 ; CHECK-NEXT: vldrw.u32 q4, [r2]
261 ; CHECK-NEXT: subs.w r2, lr, r3
262 ; CHECK-NEXT: sbcs.w r2, r12, r8
263 ; CHECK-NEXT: mov.w r3, #0
264 ; CHECK-NEXT: csetm r2, lt
265 ; CHECK-NEXT: subs r1, r1, r6
266 ; CHECK-NEXT: sbcs.w r1, r5, r7
267 ; CHECK-NEXT: bfi r3, r2, #0, #8
268 ; CHECK-NEXT: csetm r1, lt
269 ; CHECK-NEXT: vmsr p0, r4
270 ; CHECK-NEXT: bfi r3, r1, #8, #8
271 ; CHECK-NEXT: vpsel q0, q4, q5
272 ; CHECK-NEXT: vmsr p0, r3
273 ; CHECK-NEXT: vmov lr, r12, d2
274 ; CHECK-NEXT: vmov r3, r7, d6
275 ; CHECK-NEXT: movs r5, #0
276 ; CHECK-NEXT: vmov r2, r1, d7
277 ; CHECK-NEXT: vpsel q0, q2, q0
278 ; CHECK-NEXT: subs.w r6, r3, lr
279 ; CHECK-NEXT: sbcs.w r6, r7, r12
280 ; CHECK-NEXT: csetm r6, lt
281 ; CHECK-NEXT: bfi r5, r6, #0, #8
282 ; CHECK-NEXT: vmov r6, r4, d3
283 ; CHECK-NEXT: subs r0, r2, r6
284 ; CHECK-NEXT: sbcs.w r0, r1, r4
285 ; CHECK-NEXT: csetm r0, lt
286 ; CHECK-NEXT: bfi r5, r0, #8, #8
287 ; CHECK-NEXT: subs.w r0, lr, r3
288 ; CHECK-NEXT: sbcs.w r0, r12, r7
289 ; CHECK-NEXT: vmsr p0, r5
290 ; CHECK-NEXT: csetm r0, lt
291 ; CHECK-NEXT: vpsel q1, q4, q5
292 ; CHECK-NEXT: bfi r9, r0, #0, #8
293 ; CHECK-NEXT: subs r0, r6, r2
294 ; CHECK-NEXT: sbcs.w r0, r4, r1
295 ; CHECK-NEXT: csetm r0, lt
296 ; CHECK-NEXT: bfi r9, r0, #8, #8
297 ; CHECK-NEXT: vmsr p0, r9
298 ; CHECK-NEXT: vpsel q1, q2, q1
299 ; CHECK-NEXT: vpop {d8, d9, d10, d11}
300 ; CHECK-NEXT: add sp, #4
301 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, pc}
302 ; CHECK-NEXT: .p2align 4
303 ; CHECK-NEXT: @ %bb.1:
304 ; CHECK-NEXT: .LCPI9_0:
305 ; CHECK-NEXT: .long 1 @ 0x1
306 ; CHECK-NEXT: .long 0 @ 0x0
307 ; CHECK-NEXT: .long 1 @ 0x1
308 ; CHECK-NEXT: .long 0 @ 0x0
310 %c = call <4 x i64> @llvm.scmp(<4 x i64> %a, <4 x i64> %b)
314 define arm_aapcs_vfpcc <16 x i8> @signOf_neon(<8 x i16> %s0_lo, <8 x i16> %s0_hi, <8 x i16> %s1_lo, <8 x i16> %s1_hi) {
315 ; CHECK-LABEL: signOf_neon:
316 ; CHECK: @ %bb.0: @ %entry
317 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
318 ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
319 ; CHECK-NEXT: .pad #16
320 ; CHECK-NEXT: sub sp, #16
321 ; CHECK-NEXT: vmov.i32 q4, #0x0
322 ; CHECK-NEXT: vmov.i16 q5, #0x1
323 ; CHECK-NEXT: vcmp.s16 gt, q1, q3
324 ; CHECK-NEXT: vmov.i8 q7, #0xff
325 ; CHECK-NEXT: vpsel q6, q5, q4
326 ; CHECK-NEXT: vcmp.s16 gt, q3, q1
327 ; CHECK-NEXT: vpsel q1, q7, q6
328 ; CHECK-NEXT: vcmp.s16 gt, q0, q2
329 ; CHECK-NEXT: vpsel q3, q5, q4
330 ; CHECK-NEXT: vcmp.s16 gt, q2, q0
331 ; CHECK-NEXT: mov r0, sp
332 ; CHECK-NEXT: vpsel q0, q7, q3
333 ; CHECK-NEXT: vstrb.16 q1, [r0, #8]
334 ; CHECK-NEXT: vstrb.16 q0, [r0]
335 ; CHECK-NEXT: vldrw.u32 q0, [r0]
336 ; CHECK-NEXT: add sp, #16
337 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
340 %0 = shufflevector <8 x i16> %s0_lo, <8 x i16> %s0_hi, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
341 %1 = shufflevector <8 x i16> %s1_lo, <8 x i16> %s1_hi, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
342 %or.i = tail call <16 x i8> @llvm.scmp.v16i8.v16i16(<16 x i16> %0, <16 x i16> %1)