1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -float-abi=hard -verify-machineinstrs %s -o - | FileCheck %s
4 define float @add_f32(<8 x float> %a, <4 x float> %b) {
5 ; CHECK-LABEL: add_f32:
7 ; CHECK-NEXT: vadd.f32 q0, q0, q1
8 ; CHECK-NEXT: vadd.f32 q0, q0, q2
9 ; CHECK-NEXT: vadd.f32 s2, s2, s3
10 ; CHECK-NEXT: vadd.f32 s0, s0, s1
11 ; CHECK-NEXT: vadd.f32 s0, s0, s2
13 %r1 = call fast float @llvm.vector.reduce.fadd.f32.v8f32(float -0.0, <8 x float> %a)
14 %r2 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %b)
15 %r = fadd fast float %r1, %r2
19 define float @fmul_f32(<8 x float> %a, <4 x float> %b) {
20 ; CHECK-LABEL: fmul_f32:
22 ; CHECK-NEXT: vmul.f32 q0, q0, q1
23 ; CHECK-NEXT: vmul.f32 q0, q0, q2
24 ; CHECK-NEXT: vmul.f32 s2, s2, s3
25 ; CHECK-NEXT: vmul.f32 s0, s0, s1
26 ; CHECK-NEXT: vmul.f32 s0, s0, s2
28 %r1 = call fast float @llvm.vector.reduce.fmul.f32.v8f32(float 1.0, <8 x float> %a)
29 %r2 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %b)
30 %r = fmul fast float %r1, %r2
34 define float @fmin_f32(<8 x float> %a, <4 x float> %b) {
35 ; CHECK-LABEL: fmin_f32:
37 ; CHECK-NEXT: vminnm.f32 q0, q0, q1
38 ; CHECK-NEXT: vminnm.f32 q0, q0, q2
39 ; CHECK-NEXT: vminnm.f32 s2, s2, s3
40 ; CHECK-NEXT: vminnm.f32 s0, s0, s1
41 ; CHECK-NEXT: vminnm.f32 s0, s0, s2
43 %r1 = call fast float @llvm.vector.reduce.fmin.v8f32(<8 x float> %a)
44 %r2 = call fast float @llvm.vector.reduce.fmin.v4f32(<4 x float> %b)
45 %r = call float @llvm.minnum.f32(float %r1, float %r2)
49 define float @fmax_f32(<8 x float> %a, <4 x float> %b) {
50 ; CHECK-LABEL: fmax_f32:
52 ; CHECK-NEXT: vmaxnm.f32 q0, q0, q1
53 ; CHECK-NEXT: vmaxnm.f32 q0, q0, q2
54 ; CHECK-NEXT: vmaxnm.f32 s2, s2, s3
55 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s1
56 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s2
58 %r1 = call fast float @llvm.vector.reduce.fmax.v8f32(<8 x float> %a)
59 %r2 = call fast float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b)
60 %r = call float @llvm.maxnum.f32(float %r1, float %r2)
65 define i32 @add_i32(<8 x i32> %a, <4 x i32> %b) {
66 ; CHECK-LABEL: add_i32:
68 ; CHECK-NEXT: vaddv.u32 r0, q1
69 ; CHECK-NEXT: vaddva.u32 r0, q0
70 ; CHECK-NEXT: vaddva.u32 r0, q2
72 %r1 = call i32 @llvm.vector.reduce.add.i32.v8i32(<8 x i32> %a)
73 %r2 = call i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32> %b)
78 define i16 @add_ext_i16(<16 x i8> %a, <16 x i8> %b) {
79 ; CHECK-LABEL: add_ext_i16:
81 ; CHECK-NEXT: vaddv.u8 r0, q1
82 ; CHECK-NEXT: vaddva.u8 r0, q0
84 %ae = zext <16 x i8> %a to <16 x i16>
85 %be = zext <16 x i8> %b to <16 x i16>
86 %r1 = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %ae)
87 %r2 = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %be)
92 define i16 @add_ext_v32i16(<32 x i8> %a, <16 x i8> %b) {
93 ; CHECK-LABEL: add_ext_v32i16:
95 ; CHECK-NEXT: .pad #32
96 ; CHECK-NEXT: sub sp, #32
97 ; CHECK-NEXT: mov r1, sp
98 ; CHECK-NEXT: add r2, sp, #16
99 ; CHECK-NEXT: vstrw.32 q0, [r1]
100 ; CHECK-NEXT: vstrw.32 q1, [r2]
101 ; CHECK-NEXT: vldrb.u16 q1, [r2]
102 ; CHECK-NEXT: vldrb.u16 q0, [r1]
103 ; CHECK-NEXT: vaddv.u16 r0, q1
104 ; CHECK-NEXT: vaddva.u16 r0, q0
105 ; CHECK-NEXT: vldrb.u16 q0, [r1, #8]
106 ; CHECK-NEXT: vaddva.u16 r0, q0
107 ; CHECK-NEXT: vldrb.u16 q0, [r2, #8]
108 ; CHECK-NEXT: vaddva.u16 r0, q0
109 ; CHECK-NEXT: vaddva.u8 r0, q2
110 ; CHECK-NEXT: add sp, #32
112 %ae = zext <32 x i8> %a to <32 x i16>
113 %be = zext <16 x i8> %b to <16 x i16>
114 %r1 = call i16 @llvm.vector.reduce.add.i16.v32i16(<32 x i16> %ae)
115 %r2 = call i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16> %be)
116 %r = add i16 %r1, %r2
120 define i32 @mul_i32(<8 x i32> %a, <4 x i32> %b) {
121 ; CHECK-LABEL: mul_i32:
123 ; CHECK-NEXT: vmul.i32 q0, q0, q1
124 ; CHECK-NEXT: vmul.i32 q0, q0, q2
125 ; CHECK-NEXT: vmov r0, r1, d1
126 ; CHECK-NEXT: vmov r2, r3, d0
127 ; CHECK-NEXT: muls r0, r1, r0
128 ; CHECK-NEXT: mul r1, r2, r3
129 ; CHECK-NEXT: muls r0, r1, r0
131 %r1 = call i32 @llvm.vector.reduce.mul.i32.v8i32(<8 x i32> %a)
132 %r2 = call i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32> %b)
133 %r = mul i32 %r1, %r2
137 define i32 @and_i32(<8 x i32> %a, <4 x i32> %b) {
138 ; CHECK-LABEL: and_i32:
140 ; CHECK-NEXT: vand q0, q0, q1
141 ; CHECK-NEXT: vand q0, q0, q2
142 ; CHECK-NEXT: vmov r0, r1, d1
143 ; CHECK-NEXT: vmov r2, r3, d0
144 ; CHECK-NEXT: ands r0, r1
145 ; CHECK-NEXT: and.w r1, r2, r3
146 ; CHECK-NEXT: ands r0, r1
148 %r1 = call i32 @llvm.vector.reduce.and.i32.v8i32(<8 x i32> %a)
149 %r2 = call i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32> %b)
150 %r = and i32 %r1, %r2
154 define i32 @or_i32(<8 x i32> %a, <4 x i32> %b) {
155 ; CHECK-LABEL: or_i32:
157 ; CHECK-NEXT: vorr q0, q0, q1
158 ; CHECK-NEXT: vorr q0, q0, q2
159 ; CHECK-NEXT: vmov r0, r1, d1
160 ; CHECK-NEXT: vmov r2, r3, d0
161 ; CHECK-NEXT: orrs r0, r1
162 ; CHECK-NEXT: orr.w r1, r2, r3
163 ; CHECK-NEXT: orrs r0, r1
165 %r1 = call i32 @llvm.vector.reduce.or.i32.v8i32(<8 x i32> %a)
166 %r2 = call i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32> %b)
171 define i32 @xor_i32(<8 x i32> %a, <4 x i32> %b) {
172 ; CHECK-LABEL: xor_i32:
174 ; CHECK-NEXT: veor q0, q0, q1
175 ; CHECK-NEXT: veor q0, q0, q2
176 ; CHECK-NEXT: vmov r0, r1, d1
177 ; CHECK-NEXT: vmov r2, r3, d0
178 ; CHECK-NEXT: eors r0, r1
179 ; CHECK-NEXT: eor.w r1, r2, r3
180 ; CHECK-NEXT: eors r0, r1
182 %r1 = call i32 @llvm.vector.reduce.xor.i32.v8i32(<8 x i32> %a)
183 %r2 = call i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32> %b)
184 %r = xor i32 %r1, %r2
188 define i32 @umin_i32(<8 x i32> %a, <4 x i32> %b) {
189 ; CHECK-LABEL: umin_i32:
191 ; CHECK-NEXT: vmin.u32 q0, q0, q1
192 ; CHECK-NEXT: mov.w r0, #-1
193 ; CHECK-NEXT: vmin.u32 q0, q0, q2
194 ; CHECK-NEXT: vminv.u32 r0, q0
196 %r1 = call i32 @llvm.vector.reduce.umin.i32.v8i32(<8 x i32> %a)
197 %r2 = call i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32> %b)
198 %r = call i32 @llvm.umin.i32(i32 %r1, i32 %r2)
202 define i32 @umax_i32(<8 x i32> %a, <4 x i32> %b) {
203 ; CHECK-LABEL: umax_i32:
205 ; CHECK-NEXT: vmax.u32 q0, q0, q1
206 ; CHECK-NEXT: movs r0, #0
207 ; CHECK-NEXT: vmax.u32 q0, q0, q2
208 ; CHECK-NEXT: vmaxv.u32 r0, q0
210 %r1 = call i32 @llvm.vector.reduce.umax.i32.v8i32(<8 x i32> %a)
211 %r2 = call i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32> %b)
212 %r = call i32 @llvm.umax.i32(i32 %r1, i32 %r2)
216 define i32 @smin_i32(<8 x i32> %a, <4 x i32> %b) {
217 ; CHECK-LABEL: smin_i32:
219 ; CHECK-NEXT: vmin.s32 q0, q0, q1
220 ; CHECK-NEXT: mvn r0, #-2147483648
221 ; CHECK-NEXT: vmin.s32 q0, q0, q2
222 ; CHECK-NEXT: vminv.s32 r0, q0
224 %r1 = call i32 @llvm.vector.reduce.smin.i32.v8i32(<8 x i32> %a)
225 %r2 = call i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32> %b)
226 %r = call i32 @llvm.smin.i32(i32 %r1, i32 %r2)
230 define i32 @smax_i32(<8 x i32> %a, <4 x i32> %b) {
231 ; CHECK-LABEL: smax_i32:
233 ; CHECK-NEXT: vmax.s32 q0, q0, q1
234 ; CHECK-NEXT: mov.w r0, #-2147483648
235 ; CHECK-NEXT: vmax.s32 q0, q0, q2
236 ; CHECK-NEXT: vmaxv.s32 r0, q0
238 %r1 = call i32 @llvm.vector.reduce.smax.i32.v8i32(<8 x i32> %a)
239 %r2 = call i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32> %b)
240 %r = call i32 @llvm.smax.i32(i32 %r1, i32 %r2)
244 define float @nested_add_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) {
245 ; CHECK-LABEL: nested_add_f32:
247 ; CHECK-NEXT: vadd.f32 s6, s6, s7
248 ; CHECK-NEXT: vadd.f32 s4, s4, s5
249 ; CHECK-NEXT: vadd.f32 s2, s2, s3
250 ; CHECK-NEXT: vadd.f32 s0, s0, s1
251 ; CHECK-NEXT: vadd.f32 s4, s4, s6
252 ; CHECK-NEXT: vadd.f32 s0, s0, s2
253 ; CHECK-NEXT: vadd.f32 s2, s4, s9
254 ; CHECK-NEXT: vadd.f32 s0, s0, s8
255 ; CHECK-NEXT: vadd.f32 s0, s0, s2
257 %r1 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %a)
258 %a1 = fadd fast float %r1, %c
259 %r2 = call fast float @llvm.vector.reduce.fadd.f32.v4f32(float -0.0, <4 x float> %b)
260 %a2 = fadd fast float %r2, %d
261 %r = fadd fast float %a1, %a2
265 define float @nested_mul_f32(<4 x float> %a, <4 x float> %b, float %c, float %d) {
266 ; CHECK-LABEL: nested_mul_f32:
268 ; CHECK-NEXT: vmul.f32 s6, s6, s7
269 ; CHECK-NEXT: vmul.f32 s4, s4, s5
270 ; CHECK-NEXT: vmul.f32 s2, s2, s3
271 ; CHECK-NEXT: vmul.f32 s0, s0, s1
272 ; CHECK-NEXT: vmul.f32 s4, s4, s6
273 ; CHECK-NEXT: vmul.f32 s0, s0, s2
274 ; CHECK-NEXT: vmul.f32 s2, s4, s9
275 ; CHECK-NEXT: vmul.f32 s0, s0, s8
276 ; CHECK-NEXT: vmul.f32 s0, s0, s2
278 %r1 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %a)
279 %a1 = fmul fast float %r1, %c
280 %r2 = call fast float @llvm.vector.reduce.fmul.f32.v4f32(float 1.0, <4 x float> %b)
281 %a2 = fmul fast float %r2, %d
282 %r = fmul fast float %a1, %a2
286 define i32 @nested_add_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
287 ; CHECK-LABEL: nested_add_i32:
289 ; CHECK-NEXT: add r0, r1
290 ; CHECK-NEXT: vaddva.u32 r0, q0
291 ; CHECK-NEXT: vaddva.u32 r0, q1
293 %r1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %a)
294 %a1 = add i32 %r1, %c
295 %r2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %b)
296 %a2 = add i32 %r2, %d
297 %r = add i32 %a1, %a2
301 define i32 @nested_mul_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
302 ; CHECK-LABEL: nested_mul_i32:
304 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
305 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
306 ; CHECK-NEXT: vmov r8, r3, d2
307 ; CHECK-NEXT: vmov r4, r5, d1
308 ; CHECK-NEXT: vmov r6, r7, d0
309 ; CHECK-NEXT: vmov r12, lr, d3
310 ; CHECK-NEXT: mul r3, r8, r3
311 ; CHECK-NEXT: muls r5, r4, r5
312 ; CHECK-NEXT: mul r2, r12, lr
313 ; CHECK-NEXT: muls r7, r6, r7
314 ; CHECK-NEXT: muls r2, r3, r2
315 ; CHECK-NEXT: mul r3, r7, r5
316 ; CHECK-NEXT: muls r1, r2, r1
317 ; CHECK-NEXT: muls r0, r3, r0
318 ; CHECK-NEXT: muls r0, r1, r0
319 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
320 %r1 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %a)
321 %a1 = mul i32 %r1, %c
322 %r2 = call i32 @llvm.vector.reduce.mul.v4i32(<4 x i32> %b)
323 %a2 = mul i32 %r2, %d
324 %r = mul i32 %a1, %a2
328 define i32 @nested_and_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
329 ; CHECK-LABEL: nested_and_i32:
331 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
332 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
333 ; CHECK-NEXT: vmov r2, r3, d2
334 ; CHECK-NEXT: vmov r12, lr, d3
335 ; CHECK-NEXT: vmov r8, r5, d1
336 ; CHECK-NEXT: vmov r6, r7, d0
337 ; CHECK-NEXT: ands r2, r3
338 ; CHECK-NEXT: and.w r4, r12, lr
339 ; CHECK-NEXT: ands r2, r4
340 ; CHECK-NEXT: ands r1, r2
341 ; CHECK-NEXT: and.w r2, r8, r5
342 ; CHECK-NEXT: and.w r3, r6, r7
343 ; CHECK-NEXT: ands r2, r3
344 ; CHECK-NEXT: ands r0, r2
345 ; CHECK-NEXT: ands r0, r1
346 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
347 %r1 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %a)
348 %a1 = and i32 %r1, %c
349 %r2 = call i32 @llvm.vector.reduce.and.v4i32(<4 x i32> %b)
350 %a2 = and i32 %r2, %d
351 %r = and i32 %a1, %a2
355 define i32 @nested_or_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
356 ; CHECK-LABEL: nested_or_i32:
358 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
359 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
360 ; CHECK-NEXT: vmov r2, r3, d2
361 ; CHECK-NEXT: vmov r12, lr, d3
362 ; CHECK-NEXT: vmov r8, r5, d1
363 ; CHECK-NEXT: vmov r6, r7, d0
364 ; CHECK-NEXT: orrs r2, r3
365 ; CHECK-NEXT: orr.w r4, r12, lr
366 ; CHECK-NEXT: orrs r2, r4
367 ; CHECK-NEXT: orrs r1, r2
368 ; CHECK-NEXT: orr.w r2, r8, r5
369 ; CHECK-NEXT: orr.w r3, r6, r7
370 ; CHECK-NEXT: orrs r2, r3
371 ; CHECK-NEXT: orrs r0, r2
372 ; CHECK-NEXT: orrs r0, r1
373 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
374 %r1 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %a)
376 %r2 = call i32 @llvm.vector.reduce.or.v4i32(<4 x i32> %b)
382 define i32 @nested_xor_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
383 ; CHECK-LABEL: nested_xor_i32:
385 ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, lr}
386 ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, lr}
387 ; CHECK-NEXT: vmov r2, r3, d2
388 ; CHECK-NEXT: vmov r12, lr, d3
389 ; CHECK-NEXT: vmov r8, r5, d1
390 ; CHECK-NEXT: vmov r6, r7, d0
391 ; CHECK-NEXT: eors r2, r3
392 ; CHECK-NEXT: eor.w r4, r12, lr
393 ; CHECK-NEXT: eors r2, r4
394 ; CHECK-NEXT: eors r1, r2
395 ; CHECK-NEXT: eor.w r2, r8, r5
396 ; CHECK-NEXT: eor.w r3, r6, r7
397 ; CHECK-NEXT: eors r2, r3
398 ; CHECK-NEXT: eors r0, r2
399 ; CHECK-NEXT: eors r0, r1
400 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, pc}
401 %r1 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %a)
402 %a1 = xor i32 %r1, %c
403 %r2 = call i32 @llvm.vector.reduce.xor.v4i32(<4 x i32> %b)
404 %a2 = xor i32 %r2, %d
405 %r = xor i32 %a1, %a2
409 define i32 @nested_smin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
410 ; CHECK-LABEL: nested_smin_i32:
412 ; CHECK-NEXT: mvn r3, #-2147483648
413 ; CHECK-NEXT: mvn r2, #-2147483648
414 ; CHECK-NEXT: vminv.s32 r3, q0
415 ; CHECK-NEXT: vminv.s32 r2, q1
416 ; CHECK-NEXT: cmp r3, r0
417 ; CHECK-NEXT: csel r0, r3, r0, lt
418 ; CHECK-NEXT: cmp r2, r1
419 ; CHECK-NEXT: csel r1, r2, r1, lt
420 ; CHECK-NEXT: cmp r0, r1
421 ; CHECK-NEXT: csel r0, r0, r1, lt
423 %r1 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %a)
424 %a1 = call i32 @llvm.smin.i32(i32 %r1, i32 %c)
425 %r2 = call i32 @llvm.vector.reduce.smin.v4i32(<4 x i32> %b)
426 %a2 = call i32 @llvm.smin.i32(i32 %r2, i32 %d)
427 %r = call i32 @llvm.smin.i32(i32 %a1, i32 %a2)
431 define i32 @nested_smax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
432 ; CHECK-LABEL: nested_smax_i32:
434 ; CHECK-NEXT: mov.w r3, #-2147483648
435 ; CHECK-NEXT: mov.w r2, #-2147483648
436 ; CHECK-NEXT: vmaxv.s32 r3, q0
437 ; CHECK-NEXT: vmaxv.s32 r2, q1
438 ; CHECK-NEXT: cmp r3, r0
439 ; CHECK-NEXT: csel r0, r3, r0, gt
440 ; CHECK-NEXT: cmp r2, r1
441 ; CHECK-NEXT: csel r1, r2, r1, gt
442 ; CHECK-NEXT: cmp r0, r1
443 ; CHECK-NEXT: csel r0, r0, r1, gt
445 %r1 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %a)
446 %a1 = call i32 @llvm.smax.i32(i32 %r1, i32 %c)
447 %r2 = call i32 @llvm.vector.reduce.smax.v4i32(<4 x i32> %b)
448 %a2 = call i32 @llvm.smax.i32(i32 %r2, i32 %d)
449 %r = call i32 @llvm.smax.i32(i32 %a1, i32 %a2)
453 define i32 @nested_umin_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
454 ; CHECK-LABEL: nested_umin_i32:
456 ; CHECK-NEXT: mov.w r3, #-1
457 ; CHECK-NEXT: mov.w r2, #-1
458 ; CHECK-NEXT: vminv.u32 r3, q0
459 ; CHECK-NEXT: vminv.u32 r2, q1
460 ; CHECK-NEXT: cmp r3, r0
461 ; CHECK-NEXT: csel r0, r3, r0, lo
462 ; CHECK-NEXT: cmp r2, r1
463 ; CHECK-NEXT: csel r1, r2, r1, lo
464 ; CHECK-NEXT: cmp r0, r1
465 ; CHECK-NEXT: csel r0, r0, r1, lo
467 %r1 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %a)
468 %a1 = call i32 @llvm.umin.i32(i32 %r1, i32 %c)
469 %r2 = call i32 @llvm.vector.reduce.umin.v4i32(<4 x i32> %b)
470 %a2 = call i32 @llvm.umin.i32(i32 %r2, i32 %d)
471 %r = call i32 @llvm.umin.i32(i32 %a1, i32 %a2)
475 define i32 @nested_umax_i32(<4 x i32> %a, <4 x i32> %b, i32 %c, i32 %d) {
476 ; CHECK-LABEL: nested_umax_i32:
478 ; CHECK-NEXT: movs r3, #0
479 ; CHECK-NEXT: movs r2, #0
480 ; CHECK-NEXT: vmaxv.u32 r3, q0
481 ; CHECK-NEXT: vmaxv.u32 r2, q1
482 ; CHECK-NEXT: cmp r3, r0
483 ; CHECK-NEXT: csel r0, r3, r0, hi
484 ; CHECK-NEXT: cmp r2, r1
485 ; CHECK-NEXT: csel r1, r2, r1, hi
486 ; CHECK-NEXT: cmp r0, r1
487 ; CHECK-NEXT: csel r0, r0, r1, hi
489 %r1 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %a)
490 %a1 = call i32 @llvm.umax.i32(i32 %r1, i32 %c)
491 %r2 = call i32 @llvm.vector.reduce.umax.v4i32(<4 x i32> %b)
492 %a2 = call i32 @llvm.umax.i32(i32 %r2, i32 %d)
493 %r = call i32 @llvm.umax.i32(i32 %a1, i32 %a2)
497 define float @nested_fmin_float(<4 x float> %a, <4 x float> %b, float %c, float %d) {
498 ; CHECK-LABEL: nested_fmin_float:
500 ; CHECK-NEXT: vminnm.f32 s2, s2, s3
501 ; CHECK-NEXT: vminnm.f32 s0, s0, s1
502 ; CHECK-NEXT: vminnm.f32 s0, s0, s2
503 ; CHECK-NEXT: vminnm.f32 s2, s6, s7
504 ; CHECK-NEXT: vminnm.f32 s4, s4, s5
505 ; CHECK-NEXT: vminnm.f32 s0, s0, s8
506 ; CHECK-NEXT: vminnm.f32 s2, s4, s2
507 ; CHECK-NEXT: vminnm.f32 s2, s2, s9
508 ; CHECK-NEXT: vminnm.f32 s0, s0, s2
510 %r1 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %a)
511 %a1 = call float @llvm.minnum.f32(float %r1, float %c)
512 %r2 = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %b)
513 %a2 = call float @llvm.minnum.f32(float %r2, float %d)
514 %r = call float @llvm.minnum.f32(float %a1, float %a2)
518 define float @nested_fmax_float(<4 x float> %a, <4 x float> %b, float %c, float %d) {
519 ; CHECK-LABEL: nested_fmax_float:
521 ; CHECK-NEXT: vmaxnm.f32 s2, s2, s3
522 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s1
523 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s2
524 ; CHECK-NEXT: vmaxnm.f32 s2, s6, s7
525 ; CHECK-NEXT: vmaxnm.f32 s4, s4, s5
526 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s8
527 ; CHECK-NEXT: vmaxnm.f32 s2, s4, s2
528 ; CHECK-NEXT: vmaxnm.f32 s2, s2, s9
529 ; CHECK-NEXT: vmaxnm.f32 s0, s0, s2
531 %r1 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %a)
532 %a1 = call float @llvm.maxnum.f32(float %r1, float %c)
533 %r2 = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %b)
534 %a2 = call float @llvm.maxnum.f32(float %r2, float %d)
535 %r = call float @llvm.maxnum.f32(float %a1, float %a2)
539 declare float @llvm.vector.reduce.fadd.f32.v8f32(float, <8 x float>)
540 declare float @llvm.vector.reduce.fadd.f32.v4f32(float, <4 x float>)
541 declare float @llvm.vector.reduce.fmul.f32.v8f32(float, <8 x float>)
542 declare float @llvm.vector.reduce.fmul.f32.v4f32(float, <4 x float>)
543 declare float @llvm.vector.reduce.fmin.v8f32(<8 x float>)
544 declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
545 declare float @llvm.vector.reduce.fmax.v8f32(<8 x float>)
546 declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>)
547 declare i32 @llvm.vector.reduce.add.i32.v8i32(<8 x i32>)
548 declare i32 @llvm.vector.reduce.add.i32.v4i32(<4 x i32>)
549 declare i16 @llvm.vector.reduce.add.i16.v32i16(<32 x i16>)
550 declare i16 @llvm.vector.reduce.add.i16.v16i16(<16 x i16>)
551 declare i32 @llvm.vector.reduce.mul.i32.v8i32(<8 x i32>)
552 declare i32 @llvm.vector.reduce.mul.i32.v4i32(<4 x i32>)
553 declare i32 @llvm.vector.reduce.and.i32.v8i32(<8 x i32>)
554 declare i32 @llvm.vector.reduce.and.i32.v4i32(<4 x i32>)
555 declare i32 @llvm.vector.reduce.or.i32.v8i32(<8 x i32>)
556 declare i32 @llvm.vector.reduce.or.i32.v4i32(<4 x i32>)
557 declare i32 @llvm.vector.reduce.xor.i32.v8i32(<8 x i32>)
558 declare i32 @llvm.vector.reduce.xor.i32.v4i32(<4 x i32>)
559 declare i32 @llvm.vector.reduce.umin.i32.v8i32(<8 x i32>)
560 declare i32 @llvm.vector.reduce.umin.i32.v4i32(<4 x i32>)
561 declare i32 @llvm.vector.reduce.umax.i32.v8i32(<8 x i32>)
562 declare i32 @llvm.vector.reduce.umax.i32.v4i32(<4 x i32>)
563 declare i32 @llvm.vector.reduce.smin.i32.v8i32(<8 x i32>)
564 declare i32 @llvm.vector.reduce.smin.i32.v4i32(<4 x i32>)
565 declare i32 @llvm.vector.reduce.smax.i32.v8i32(<8 x i32>)
566 declare i32 @llvm.vector.reduce.smax.i32.v4i32(<4 x i32>)
567 declare float @llvm.minnum.f32(float, float)
568 declare float @llvm.maxnum.f32(float, float)
569 declare i32 @llvm.umin.i32(i32, i32)
570 declare i32 @llvm.umax.i32(i32, i32)
571 declare i32 @llvm.smin.i32(i32, i32)
572 declare i32 @llvm.smax.i32(i32, i32)