1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
3 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s
5 declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>)
7 define half @vreduce_fadd_v1f16(ptr %x, half %s) {
8 ; CHECK-LABEL: vreduce_fadd_v1f16:
10 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
11 ; CHECK-NEXT: vle16.v v8, (a0)
12 ; CHECK-NEXT: vfmv.f.s fa5, v8
13 ; CHECK-NEXT: fadd.h fa0, fa0, fa5
15 %v = load <1 x half>, ptr %x
16 %red = call reassoc half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v)
20 define half @vreduce_ord_fadd_v1f16(ptr %x, half %s) {
21 ; CHECK-LABEL: vreduce_ord_fadd_v1f16:
23 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
24 ; CHECK-NEXT: vle16.v v8, (a0)
25 ; CHECK-NEXT: vfmv.s.f v9, fa0
26 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
27 ; CHECK-NEXT: vfmv.f.s fa0, v8
29 %v = load <1 x half>, ptr %x
30 %red = call half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v)
34 declare half @llvm.vector.reduce.fadd.v2f16(half, <2 x half>)
36 define half @vreduce_fadd_v2f16(ptr %x, half %s) {
37 ; CHECK-LABEL: vreduce_fadd_v2f16:
39 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
40 ; CHECK-NEXT: vle16.v v8, (a0)
41 ; CHECK-NEXT: vfmv.s.f v9, fa0
42 ; CHECK-NEXT: vfredusum.vs v8, v8, v9
43 ; CHECK-NEXT: vfmv.f.s fa0, v8
45 %v = load <2 x half>, ptr %x
46 %red = call reassoc half @llvm.vector.reduce.fadd.v2f16(half %s, <2 x half> %v)
50 define half @vreduce_ord_fadd_v2f16(ptr %x, half %s) {
51 ; CHECK-LABEL: vreduce_ord_fadd_v2f16:
53 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
54 ; CHECK-NEXT: vle16.v v8, (a0)
55 ; CHECK-NEXT: vfmv.s.f v9, fa0
56 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
57 ; CHECK-NEXT: vfmv.f.s fa0, v8
59 %v = load <2 x half>, ptr %x
60 %red = call half @llvm.vector.reduce.fadd.v2f16(half %s, <2 x half> %v)
64 declare half @llvm.vector.reduce.fadd.v4f16(half, <4 x half>)
66 define half @vreduce_fadd_v4f16(ptr %x, half %s) {
67 ; CHECK-LABEL: vreduce_fadd_v4f16:
69 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
70 ; CHECK-NEXT: vle16.v v8, (a0)
71 ; CHECK-NEXT: vfmv.s.f v9, fa0
72 ; CHECK-NEXT: vfredusum.vs v8, v8, v9
73 ; CHECK-NEXT: vfmv.f.s fa0, v8
75 %v = load <4 x half>, ptr %x
76 %red = call reassoc half @llvm.vector.reduce.fadd.v4f16(half %s, <4 x half> %v)
80 define half @vreduce_ord_fadd_v4f16(ptr %x, half %s) {
81 ; CHECK-LABEL: vreduce_ord_fadd_v4f16:
83 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
84 ; CHECK-NEXT: vle16.v v8, (a0)
85 ; CHECK-NEXT: vfmv.s.f v9, fa0
86 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
87 ; CHECK-NEXT: vfmv.f.s fa0, v8
89 %v = load <4 x half>, ptr %x
90 %red = call half @llvm.vector.reduce.fadd.v4f16(half %s, <4 x half> %v)
94 declare half @llvm.vector.reduce.fadd.v8f16(half, <8 x half>)
96 define half @vreduce_fadd_v8f16(ptr %x, half %s) {
97 ; CHECK-LABEL: vreduce_fadd_v8f16:
99 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
100 ; CHECK-NEXT: vle16.v v8, (a0)
101 ; CHECK-NEXT: vfmv.s.f v9, fa0
102 ; CHECK-NEXT: vfredusum.vs v8, v8, v9
103 ; CHECK-NEXT: vfmv.f.s fa0, v8
105 %v = load <8 x half>, ptr %x
106 %red = call reassoc half @llvm.vector.reduce.fadd.v8f16(half %s, <8 x half> %v)
110 define half @vreduce_ord_fadd_v8f16(ptr %x, half %s) {
111 ; CHECK-LABEL: vreduce_ord_fadd_v8f16:
113 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
114 ; CHECK-NEXT: vle16.v v8, (a0)
115 ; CHECK-NEXT: vfmv.s.f v9, fa0
116 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
117 ; CHECK-NEXT: vfmv.f.s fa0, v8
119 %v = load <8 x half>, ptr %x
120 %red = call half @llvm.vector.reduce.fadd.v8f16(half %s, <8 x half> %v)
124 declare half @llvm.vector.reduce.fadd.v16f16(half, <16 x half>)
126 define half @vreduce_fadd_v16f16(ptr %x, half %s) {
127 ; CHECK-LABEL: vreduce_fadd_v16f16:
129 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
130 ; CHECK-NEXT: vle16.v v8, (a0)
131 ; CHECK-NEXT: vfmv.s.f v10, fa0
132 ; CHECK-NEXT: vfredusum.vs v8, v8, v10
133 ; CHECK-NEXT: vfmv.f.s fa0, v8
135 %v = load <16 x half>, ptr %x
136 %red = call reassoc half @llvm.vector.reduce.fadd.v16f16(half %s, <16 x half> %v)
140 define half @vreduce_ord_fadd_v16f16(ptr %x, half %s) {
141 ; CHECK-LABEL: vreduce_ord_fadd_v16f16:
143 ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
144 ; CHECK-NEXT: vle16.v v8, (a0)
145 ; CHECK-NEXT: vfmv.s.f v10, fa0
146 ; CHECK-NEXT: vfredosum.vs v8, v8, v10
147 ; CHECK-NEXT: vfmv.f.s fa0, v8
149 %v = load <16 x half>, ptr %x
150 %red = call half @llvm.vector.reduce.fadd.v16f16(half %s, <16 x half> %v)
154 declare half @llvm.vector.reduce.fadd.v32f16(half, <32 x half>)
156 define half @vreduce_fadd_v32f16(ptr %x, half %s) {
157 ; CHECK-LABEL: vreduce_fadd_v32f16:
159 ; CHECK-NEXT: li a1, 32
160 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
161 ; CHECK-NEXT: vle16.v v8, (a0)
162 ; CHECK-NEXT: vfmv.s.f v12, fa0
163 ; CHECK-NEXT: vfredusum.vs v8, v8, v12
164 ; CHECK-NEXT: vfmv.f.s fa0, v8
166 %v = load <32 x half>, ptr %x
167 %red = call reassoc half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v)
171 define half @vreduce_ord_fadd_v32f16(ptr %x, half %s) {
172 ; CHECK-LABEL: vreduce_ord_fadd_v32f16:
174 ; CHECK-NEXT: li a1, 32
175 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
176 ; CHECK-NEXT: vle16.v v8, (a0)
177 ; CHECK-NEXT: vfmv.s.f v12, fa0
178 ; CHECK-NEXT: vfredosum.vs v8, v8, v12
179 ; CHECK-NEXT: vfmv.f.s fa0, v8
181 %v = load <32 x half>, ptr %x
182 %red = call half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v)
186 declare half @llvm.vector.reduce.fadd.v64f16(half, <64 x half>)
188 define half @vreduce_fadd_v64f16(ptr %x, half %s) {
189 ; CHECK-LABEL: vreduce_fadd_v64f16:
191 ; CHECK-NEXT: li a1, 64
192 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
193 ; CHECK-NEXT: vle16.v v8, (a0)
194 ; CHECK-NEXT: vfmv.s.f v16, fa0
195 ; CHECK-NEXT: vfredusum.vs v8, v8, v16
196 ; CHECK-NEXT: vfmv.f.s fa0, v8
198 %v = load <64 x half>, ptr %x
199 %red = call reassoc half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v)
203 define half @vreduce_ord_fadd_v64f16(ptr %x, half %s) {
204 ; CHECK-LABEL: vreduce_ord_fadd_v64f16:
206 ; CHECK-NEXT: li a1, 64
207 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
208 ; CHECK-NEXT: vle16.v v8, (a0)
209 ; CHECK-NEXT: vfmv.s.f v16, fa0
210 ; CHECK-NEXT: vfredosum.vs v8, v8, v16
211 ; CHECK-NEXT: vfmv.f.s fa0, v8
213 %v = load <64 x half>, ptr %x
214 %red = call half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v)
218 declare half @llvm.vector.reduce.fadd.v128f16(half, <128 x half>)
220 define half @vreduce_fadd_v128f16(ptr %x, half %s) {
221 ; CHECK-LABEL: vreduce_fadd_v128f16:
223 ; CHECK-NEXT: li a1, 64
224 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
225 ; CHECK-NEXT: vle16.v v8, (a0)
226 ; CHECK-NEXT: addi a0, a0, 128
227 ; CHECK-NEXT: vle16.v v16, (a0)
228 ; CHECK-NEXT: vfadd.vv v8, v8, v16
229 ; CHECK-NEXT: vfmv.s.f v16, fa0
230 ; CHECK-NEXT: vfredusum.vs v8, v8, v16
231 ; CHECK-NEXT: vfmv.f.s fa0, v8
233 %v = load <128 x half>, ptr %x
234 %red = call reassoc half @llvm.vector.reduce.fadd.v128f16(half %s, <128 x half> %v)
238 define half @vreduce_ord_fadd_v128f16(ptr %x, half %s) {
239 ; CHECK-LABEL: vreduce_ord_fadd_v128f16:
241 ; CHECK-NEXT: addi a1, a0, 128
242 ; CHECK-NEXT: li a2, 64
243 ; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, ma
244 ; CHECK-NEXT: vle16.v v8, (a0)
245 ; CHECK-NEXT: vle16.v v16, (a1)
246 ; CHECK-NEXT: vfmv.s.f v24, fa0
247 ; CHECK-NEXT: vfredosum.vs v8, v8, v24
248 ; CHECK-NEXT: vfredosum.vs v8, v16, v8
249 ; CHECK-NEXT: vfmv.f.s fa0, v8
251 %v = load <128 x half>, ptr %x
252 %red = call half @llvm.vector.reduce.fadd.v128f16(half %s, <128 x half> %v)
256 declare float @llvm.vector.reduce.fadd.v1f32(float, <1 x float>)
258 define float @vreduce_fadd_v1f32(ptr %x, float %s) {
259 ; CHECK-LABEL: vreduce_fadd_v1f32:
261 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
262 ; CHECK-NEXT: vle32.v v8, (a0)
263 ; CHECK-NEXT: vfmv.f.s fa5, v8
264 ; CHECK-NEXT: fadd.s fa0, fa0, fa5
266 %v = load <1 x float>, ptr %x
267 %red = call reassoc float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v)
271 define float @vreduce_ord_fadd_v1f32(ptr %x, float %s) {
272 ; CHECK-LABEL: vreduce_ord_fadd_v1f32:
274 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
275 ; CHECK-NEXT: vle32.v v8, (a0)
276 ; CHECK-NEXT: vfmv.s.f v9, fa0
277 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
278 ; CHECK-NEXT: vfmv.f.s fa0, v8
280 %v = load <1 x float>, ptr %x
281 %red = call float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v)
285 define float @vreduce_fwadd_v1f32(ptr %x, float %s) {
286 ; CHECK-LABEL: vreduce_fwadd_v1f32:
288 ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
289 ; CHECK-NEXT: vle16.v v8, (a0)
290 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
291 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
292 ; CHECK-NEXT: vfmv.f.s fa5, v9
293 ; CHECK-NEXT: fadd.s fa0, fa0, fa5
295 %v = load <1 x half>, ptr %x
296 %e = fpext <1 x half> %v to <1 x float>
297 %red = call reassoc float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %e)
301 define float @vreduce_ord_fwadd_v1f32(ptr %x, float %s) {
302 ; CHECK-LABEL: vreduce_ord_fwadd_v1f32:
304 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
305 ; CHECK-NEXT: vle16.v v8, (a0)
306 ; CHECK-NEXT: vfmv.s.f v9, fa0
307 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
308 ; CHECK-NEXT: vfwredosum.vs v8, v8, v9
309 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
310 ; CHECK-NEXT: vfmv.f.s fa0, v8
312 %v = load <1 x half>, ptr %x
313 %e = fpext <1 x half> %v to <1 x float>
314 %red = call float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %e)
318 declare float @llvm.vector.reduce.fadd.v2f32(float, <2 x float>)
320 define float @vreduce_fadd_v2f32(ptr %x, float %s) {
321 ; CHECK-LABEL: vreduce_fadd_v2f32:
323 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
324 ; CHECK-NEXT: vle32.v v8, (a0)
325 ; CHECK-NEXT: vfmv.s.f v9, fa0
326 ; CHECK-NEXT: vfredusum.vs v8, v8, v9
327 ; CHECK-NEXT: vfmv.f.s fa0, v8
329 %v = load <2 x float>, ptr %x
330 %red = call reassoc float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %v)
334 define float @vreduce_ord_fadd_v2f32(ptr %x, float %s) {
335 ; CHECK-LABEL: vreduce_ord_fadd_v2f32:
337 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
338 ; CHECK-NEXT: vle32.v v8, (a0)
339 ; CHECK-NEXT: vfmv.s.f v9, fa0
340 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
341 ; CHECK-NEXT: vfmv.f.s fa0, v8
343 %v = load <2 x float>, ptr %x
344 %red = call float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %v)
348 define float @vreduce_fwadd_v2f32(ptr %x, float %s) {
349 ; CHECK-LABEL: vreduce_fwadd_v2f32:
351 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
352 ; CHECK-NEXT: vle16.v v8, (a0)
353 ; CHECK-NEXT: vfmv.s.f v9, fa0
354 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
355 ; CHECK-NEXT: vfwredusum.vs v8, v8, v9
356 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
357 ; CHECK-NEXT: vfmv.f.s fa0, v8
359 %v = load <2 x half>, ptr %x
360 %e = fpext <2 x half> %v to <2 x float>
361 %red = call reassoc float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %e)
365 define float @vreduce_ord_fwadd_v2f32(ptr %x, float %s) {
366 ; CHECK-LABEL: vreduce_ord_fwadd_v2f32:
368 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
369 ; CHECK-NEXT: vle16.v v8, (a0)
370 ; CHECK-NEXT: vfmv.s.f v9, fa0
371 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
372 ; CHECK-NEXT: vfwredosum.vs v8, v8, v9
373 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
374 ; CHECK-NEXT: vfmv.f.s fa0, v8
376 %v = load <2 x half>, ptr %x
377 %e = fpext <2 x half> %v to <2 x float>
378 %red = call float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %e)
382 declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>)
384 define float @vreduce_fadd_v4f32(ptr %x, float %s) {
385 ; CHECK-LABEL: vreduce_fadd_v4f32:
387 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
388 ; CHECK-NEXT: vle32.v v8, (a0)
389 ; CHECK-NEXT: vfmv.s.f v9, fa0
390 ; CHECK-NEXT: vfredusum.vs v8, v8, v9
391 ; CHECK-NEXT: vfmv.f.s fa0, v8
393 %v = load <4 x float>, ptr %x
394 %red = call reassoc float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v)
398 define float @vreduce_ord_fadd_v4f32(ptr %x, float %s) {
399 ; CHECK-LABEL: vreduce_ord_fadd_v4f32:
401 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
402 ; CHECK-NEXT: vle32.v v8, (a0)
403 ; CHECK-NEXT: vfmv.s.f v9, fa0
404 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
405 ; CHECK-NEXT: vfmv.f.s fa0, v8
407 %v = load <4 x float>, ptr %x
408 %red = call float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v)
412 define float @vreduce_fwadd_v4f32(ptr %x, float %s) {
413 ; CHECK-LABEL: vreduce_fwadd_v4f32:
415 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
416 ; CHECK-NEXT: vle16.v v8, (a0)
417 ; CHECK-NEXT: vfmv.s.f v9, fa0
418 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
419 ; CHECK-NEXT: vfwredusum.vs v8, v8, v9
420 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
421 ; CHECK-NEXT: vfmv.f.s fa0, v8
423 %v = load <4 x half>, ptr %x
424 %e = fpext <4 x half> %v to <4 x float>
425 %red = call reassoc float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %e)
429 define float @vreduce_ord_fwadd_v4f32(ptr %x, float %s) {
430 ; CHECK-LABEL: vreduce_ord_fwadd_v4f32:
432 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
433 ; CHECK-NEXT: vle16.v v8, (a0)
434 ; CHECK-NEXT: vfmv.s.f v9, fa0
435 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
436 ; CHECK-NEXT: vfwredosum.vs v8, v8, v9
437 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
438 ; CHECK-NEXT: vfmv.f.s fa0, v8
440 %v = load <4 x half>, ptr %x
441 %e = fpext <4 x half> %v to <4 x float>
442 %red = call float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %e)
446 declare float @llvm.vector.reduce.fadd.v8f32(float, <8 x float>)
448 define float @vreduce_fadd_v8f32(ptr %x, float %s) {
449 ; CHECK-LABEL: vreduce_fadd_v8f32:
451 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
452 ; CHECK-NEXT: vle32.v v8, (a0)
453 ; CHECK-NEXT: vfmv.s.f v10, fa0
454 ; CHECK-NEXT: vfredusum.vs v8, v8, v10
455 ; CHECK-NEXT: vfmv.f.s fa0, v8
457 %v = load <8 x float>, ptr %x
458 %red = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %v)
462 define float @vreduce_ord_fadd_v8f32(ptr %x, float %s) {
463 ; CHECK-LABEL: vreduce_ord_fadd_v8f32:
465 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
466 ; CHECK-NEXT: vle32.v v8, (a0)
467 ; CHECK-NEXT: vfmv.s.f v10, fa0
468 ; CHECK-NEXT: vfredosum.vs v8, v8, v10
469 ; CHECK-NEXT: vfmv.f.s fa0, v8
471 %v = load <8 x float>, ptr %x
472 %red = call float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %v)
476 define float @vreduce_fwadd_v8f32(ptr %x, float %s) {
477 ; CHECK-LABEL: vreduce_fwadd_v8f32:
479 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
480 ; CHECK-NEXT: vle16.v v8, (a0)
481 ; CHECK-NEXT: vfmv.s.f v9, fa0
482 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
483 ; CHECK-NEXT: vfwredusum.vs v8, v8, v9
484 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
485 ; CHECK-NEXT: vfmv.f.s fa0, v8
487 %v = load <8 x half>, ptr %x
488 %e = fpext <8 x half> %v to <8 x float>
489 %red = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %e)
493 define float @vreduce_ord_fwadd_v8f32(ptr %x, float %s) {
494 ; CHECK-LABEL: vreduce_ord_fwadd_v8f32:
496 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
497 ; CHECK-NEXT: vle16.v v8, (a0)
498 ; CHECK-NEXT: vfmv.s.f v9, fa0
499 ; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
500 ; CHECK-NEXT: vfwredosum.vs v8, v8, v9
501 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
502 ; CHECK-NEXT: vfmv.f.s fa0, v8
504 %v = load <8 x half>, ptr %x
505 %e = fpext <8 x half> %v to <8 x float>
506 %red = call float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %e)
510 declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>)
512 define float @vreduce_fadd_v16f32(ptr %x, float %s) {
513 ; CHECK-LABEL: vreduce_fadd_v16f32:
515 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
516 ; CHECK-NEXT: vle32.v v8, (a0)
517 ; CHECK-NEXT: vfmv.s.f v12, fa0
518 ; CHECK-NEXT: vfredusum.vs v8, v8, v12
519 ; CHECK-NEXT: vfmv.f.s fa0, v8
521 %v = load <16 x float>, ptr %x
522 %red = call reassoc float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %v)
526 define float @vreduce_ord_fadd_v16f32(ptr %x, float %s) {
527 ; CHECK-LABEL: vreduce_ord_fadd_v16f32:
529 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
530 ; CHECK-NEXT: vle32.v v8, (a0)
531 ; CHECK-NEXT: vfmv.s.f v12, fa0
532 ; CHECK-NEXT: vfredosum.vs v8, v8, v12
533 ; CHECK-NEXT: vfmv.f.s fa0, v8
535 %v = load <16 x float>, ptr %x
536 %red = call float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %v)
540 define float @vreduce_fwadd_v16f32(ptr %x, float %s) {
541 ; CHECK-LABEL: vreduce_fwadd_v16f32:
543 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
544 ; CHECK-NEXT: vle16.v v8, (a0)
545 ; CHECK-NEXT: vfmv.s.f v10, fa0
546 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
547 ; CHECK-NEXT: vfwredusum.vs v8, v8, v10
548 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
549 ; CHECK-NEXT: vfmv.f.s fa0, v8
551 %v = load <16 x half>, ptr %x
552 %e = fpext <16 x half> %v to <16 x float>
553 %red = call reassoc float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %e)
557 define float @vreduce_ord_fwadd_v16f32(ptr %x, float %s) {
558 ; CHECK-LABEL: vreduce_ord_fwadd_v16f32:
560 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
561 ; CHECK-NEXT: vle16.v v8, (a0)
562 ; CHECK-NEXT: vfmv.s.f v10, fa0
563 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
564 ; CHECK-NEXT: vfwredosum.vs v8, v8, v10
565 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
566 ; CHECK-NEXT: vfmv.f.s fa0, v8
568 %v = load <16 x half>, ptr %x
569 %e = fpext <16 x half> %v to <16 x float>
570 %red = call float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %e)
574 declare float @llvm.vector.reduce.fadd.v32f32(float, <32 x float>)
576 define float @vreduce_fadd_v32f32(ptr %x, float %s) {
577 ; CHECK-LABEL: vreduce_fadd_v32f32:
579 ; CHECK-NEXT: li a1, 32
580 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
581 ; CHECK-NEXT: vle32.v v8, (a0)
582 ; CHECK-NEXT: vfmv.s.f v16, fa0
583 ; CHECK-NEXT: vfredusum.vs v8, v8, v16
584 ; CHECK-NEXT: vfmv.f.s fa0, v8
586 %v = load <32 x float>, ptr %x
587 %red = call reassoc float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v)
591 define float @vreduce_ord_fadd_v32f32(ptr %x, float %s) {
592 ; CHECK-LABEL: vreduce_ord_fadd_v32f32:
594 ; CHECK-NEXT: li a1, 32
595 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
596 ; CHECK-NEXT: vle32.v v8, (a0)
597 ; CHECK-NEXT: vfmv.s.f v16, fa0
598 ; CHECK-NEXT: vfredosum.vs v8, v8, v16
599 ; CHECK-NEXT: vfmv.f.s fa0, v8
601 %v = load <32 x float>, ptr %x
602 %red = call float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v)
606 define float @vreduce_fwadd_v32f32(ptr %x, float %s) {
607 ; CHECK-LABEL: vreduce_fwadd_v32f32:
609 ; CHECK-NEXT: li a1, 32
610 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
611 ; CHECK-NEXT: vle16.v v8, (a0)
612 ; CHECK-NEXT: vfmv.s.f v12, fa0
613 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
614 ; CHECK-NEXT: vfwredusum.vs v8, v8, v12
615 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
616 ; CHECK-NEXT: vfmv.f.s fa0, v8
618 %v = load <32 x half>, ptr %x
619 %e = fpext <32 x half> %v to <32 x float>
620 %red = call reassoc float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %e)
624 define float @vreduce_ord_fwadd_v32f32(ptr %x, float %s) {
625 ; CHECK-LABEL: vreduce_ord_fwadd_v32f32:
627 ; CHECK-NEXT: li a1, 32
628 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
629 ; CHECK-NEXT: vle16.v v8, (a0)
630 ; CHECK-NEXT: vfmv.s.f v12, fa0
631 ; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
632 ; CHECK-NEXT: vfwredosum.vs v8, v8, v12
633 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
634 ; CHECK-NEXT: vfmv.f.s fa0, v8
636 %v = load <32 x half>, ptr %x
637 %e = fpext <32 x half> %v to <32 x float>
638 %red = call float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %e)
642 declare float @llvm.vector.reduce.fadd.v64f32(float, <64 x float>)
644 define float @vreduce_fadd_v64f32(ptr %x, float %s) {
645 ; CHECK-LABEL: vreduce_fadd_v64f32:
647 ; CHECK-NEXT: li a1, 32
648 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
649 ; CHECK-NEXT: vle32.v v8, (a0)
650 ; CHECK-NEXT: addi a0, a0, 128
651 ; CHECK-NEXT: vle32.v v16, (a0)
652 ; CHECK-NEXT: vfadd.vv v8, v8, v16
653 ; CHECK-NEXT: vfmv.s.f v16, fa0
654 ; CHECK-NEXT: vfredusum.vs v8, v8, v16
655 ; CHECK-NEXT: vfmv.f.s fa0, v8
657 %v = load <64 x float>, ptr %x
658 %red = call reassoc float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %v)
662 define float @vreduce_ord_fadd_v64f32(ptr %x, float %s) {
663 ; CHECK-LABEL: vreduce_ord_fadd_v64f32:
665 ; CHECK-NEXT: addi a1, a0, 128
666 ; CHECK-NEXT: li a2, 32
667 ; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
668 ; CHECK-NEXT: vle32.v v8, (a0)
669 ; CHECK-NEXT: vle32.v v16, (a1)
670 ; CHECK-NEXT: vfmv.s.f v24, fa0
671 ; CHECK-NEXT: vfredosum.vs v8, v8, v24
672 ; CHECK-NEXT: vfredosum.vs v8, v16, v8
673 ; CHECK-NEXT: vfmv.f.s fa0, v8
675 %v = load <64 x float>, ptr %x
676 %red = call float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %v)
680 define float @vreduce_fwadd_v64f32(ptr %x, float %s) {
681 ; CHECK-LABEL: vreduce_fwadd_v64f32:
683 ; CHECK-NEXT: li a1, 64
684 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
685 ; CHECK-NEXT: vle16.v v8, (a0)
686 ; CHECK-NEXT: li a0, 32
687 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
688 ; CHECK-NEXT: vslidedown.vx v16, v8, a0
689 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
690 ; CHECK-NEXT: vfwadd.vv v24, v8, v16
691 ; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
692 ; CHECK-NEXT: vfmv.s.f v8, fa0
693 ; CHECK-NEXT: vfredusum.vs v8, v24, v8
694 ; CHECK-NEXT: vfmv.f.s fa0, v8
696 %v = load <64 x half>, ptr %x
697 %e = fpext <64 x half> %v to <64 x float>
698 %red = call reassoc float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %e)
702 define float @vreduce_ord_fwadd_v64f32(ptr %x, float %s) {
703 ; CHECK-LABEL: vreduce_ord_fwadd_v64f32:
705 ; CHECK-NEXT: li a1, 64
706 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
707 ; CHECK-NEXT: vle16.v v8, (a0)
708 ; CHECK-NEXT: li a0, 32
709 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
710 ; CHECK-NEXT: vslidedown.vx v16, v8, a0
711 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
712 ; CHECK-NEXT: vfmv.s.f v12, fa0
713 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
714 ; CHECK-NEXT: vfwredosum.vs v8, v8, v12
715 ; CHECK-NEXT: vfwredosum.vs v8, v16, v8
716 ; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
717 ; CHECK-NEXT: vfmv.f.s fa0, v8
719 %v = load <64 x half>, ptr %x
720 %e = fpext <64 x half> %v to <64 x float>
721 %red = call float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %e)
725 declare double @llvm.vector.reduce.fadd.v1f64(double, <1 x double>)
727 define double @vreduce_fadd_v1f64(ptr %x, double %s) {
728 ; CHECK-LABEL: vreduce_fadd_v1f64:
730 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
731 ; CHECK-NEXT: vle64.v v8, (a0)
732 ; CHECK-NEXT: vfmv.f.s fa5, v8
733 ; CHECK-NEXT: fadd.d fa0, fa0, fa5
735 %v = load <1 x double>, ptr %x
736 %red = call reassoc double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v)
740 define double @vreduce_ord_fadd_v1f64(ptr %x, double %s) {
741 ; CHECK-LABEL: vreduce_ord_fadd_v1f64:
743 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
744 ; CHECK-NEXT: vle64.v v8, (a0)
745 ; CHECK-NEXT: vfmv.s.f v9, fa0
746 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
747 ; CHECK-NEXT: vfmv.f.s fa0, v8
749 %v = load <1 x double>, ptr %x
750 %red = call double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v)
754 define double @vreduce_fwadd_v1f64(ptr %x, double %s) {
755 ; CHECK-LABEL: vreduce_fwadd_v1f64:
757 ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
758 ; CHECK-NEXT: vle32.v v8, (a0)
759 ; CHECK-NEXT: vfwcvt.f.f.v v9, v8
760 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
761 ; CHECK-NEXT: vfmv.f.s fa5, v9
762 ; CHECK-NEXT: fadd.d fa0, fa0, fa5
764 %v = load <1 x float>, ptr %x
765 %e = fpext <1 x float> %v to <1 x double>
766 %red = call reassoc double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %e)
770 define double @vreduce_ord_fwadd_v1f64(ptr %x, double %s) {
771 ; CHECK-LABEL: vreduce_ord_fwadd_v1f64:
773 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
774 ; CHECK-NEXT: vle32.v v8, (a0)
775 ; CHECK-NEXT: vfmv.s.f v9, fa0
776 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
777 ; CHECK-NEXT: vfwredosum.vs v8, v8, v9
778 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
779 ; CHECK-NEXT: vfmv.f.s fa0, v8
781 %v = load <1 x float>, ptr %x
782 %e = fpext <1 x float> %v to <1 x double>
783 %red = call double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %e)
787 declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>)
789 define double @vreduce_fadd_v2f64(ptr %x, double %s) {
790 ; CHECK-LABEL: vreduce_fadd_v2f64:
792 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
793 ; CHECK-NEXT: vle64.v v8, (a0)
794 ; CHECK-NEXT: vfmv.s.f v9, fa0
795 ; CHECK-NEXT: vfredusum.vs v8, v8, v9
796 ; CHECK-NEXT: vfmv.f.s fa0, v8
798 %v = load <2 x double>, ptr %x
799 %red = call reassoc double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v)
803 define double @vreduce_ord_fadd_v2f64(ptr %x, double %s) {
804 ; CHECK-LABEL: vreduce_ord_fadd_v2f64:
806 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
807 ; CHECK-NEXT: vle64.v v8, (a0)
808 ; CHECK-NEXT: vfmv.s.f v9, fa0
809 ; CHECK-NEXT: vfredosum.vs v8, v8, v9
810 ; CHECK-NEXT: vfmv.f.s fa0, v8
812 %v = load <2 x double>, ptr %x
813 %red = call double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v)
817 define double @vreduce_fwadd_v2f64(ptr %x, double %s) {
818 ; CHECK-LABEL: vreduce_fwadd_v2f64:
820 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
821 ; CHECK-NEXT: vle32.v v8, (a0)
822 ; CHECK-NEXT: vfmv.s.f v9, fa0
823 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
824 ; CHECK-NEXT: vfwredusum.vs v8, v8, v9
825 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
826 ; CHECK-NEXT: vfmv.f.s fa0, v8
828 %v = load <2 x float>, ptr %x
829 %e = fpext <2 x float> %v to <2 x double>
830 %red = call reassoc double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %e)
834 define double @vreduce_ord_fwadd_v2f64(ptr %x, double %s) {
835 ; CHECK-LABEL: vreduce_ord_fwadd_v2f64:
837 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
838 ; CHECK-NEXT: vle32.v v8, (a0)
839 ; CHECK-NEXT: vfmv.s.f v9, fa0
840 ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
841 ; CHECK-NEXT: vfwredosum.vs v8, v8, v9
842 ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
843 ; CHECK-NEXT: vfmv.f.s fa0, v8
845 %v = load <2 x float>, ptr %x
846 %e = fpext <2 x float> %v to <2 x double>
847 %red = call double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %e)
851 declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>)
853 define double @vreduce_fadd_v4f64(ptr %x, double %s) {
854 ; CHECK-LABEL: vreduce_fadd_v4f64:
856 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
857 ; CHECK-NEXT: vle64.v v8, (a0)
858 ; CHECK-NEXT: vfmv.s.f v10, fa0
859 ; CHECK-NEXT: vfredusum.vs v8, v8, v10
860 ; CHECK-NEXT: vfmv.f.s fa0, v8
862 %v = load <4 x double>, ptr %x
863 %red = call reassoc double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v)
867 define double @vreduce_ord_fadd_v4f64(ptr %x, double %s) {
868 ; CHECK-LABEL: vreduce_ord_fadd_v4f64:
870 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
871 ; CHECK-NEXT: vle64.v v8, (a0)
872 ; CHECK-NEXT: vfmv.s.f v10, fa0
873 ; CHECK-NEXT: vfredosum.vs v8, v8, v10
874 ; CHECK-NEXT: vfmv.f.s fa0, v8
876 %v = load <4 x double>, ptr %x
877 %red = call double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v)
881 define double @vreduce_fwadd_v4f64(ptr %x, double %s) {
882 ; CHECK-LABEL: vreduce_fwadd_v4f64:
884 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
885 ; CHECK-NEXT: vle32.v v8, (a0)
886 ; CHECK-NEXT: vfmv.s.f v9, fa0
887 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
888 ; CHECK-NEXT: vfwredusum.vs v8, v8, v9
889 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
890 ; CHECK-NEXT: vfmv.f.s fa0, v8
892 %v = load <4 x float>, ptr %x
893 %e = fpext <4 x float> %v to <4 x double>
894 %red = call reassoc double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %e)
898 define double @vreduce_ord_fwadd_v4f64(ptr %x, double %s) {
899 ; CHECK-LABEL: vreduce_ord_fwadd_v4f64:
901 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
902 ; CHECK-NEXT: vle32.v v8, (a0)
903 ; CHECK-NEXT: vfmv.s.f v9, fa0
904 ; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
905 ; CHECK-NEXT: vfwredosum.vs v8, v8, v9
906 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
907 ; CHECK-NEXT: vfmv.f.s fa0, v8
909 %v = load <4 x float>, ptr %x
910 %e = fpext <4 x float> %v to <4 x double>
911 %red = call double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %e)
915 declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>)
917 define double @vreduce_fadd_v8f64(ptr %x, double %s) {
918 ; CHECK-LABEL: vreduce_fadd_v8f64:
920 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
921 ; CHECK-NEXT: vle64.v v8, (a0)
922 ; CHECK-NEXT: vfmv.s.f v12, fa0
923 ; CHECK-NEXT: vfredusum.vs v8, v8, v12
924 ; CHECK-NEXT: vfmv.f.s fa0, v8
926 %v = load <8 x double>, ptr %x
927 %red = call reassoc double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v)
931 define double @vreduce_ord_fadd_v8f64(ptr %x, double %s) {
932 ; CHECK-LABEL: vreduce_ord_fadd_v8f64:
934 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
935 ; CHECK-NEXT: vle64.v v8, (a0)
936 ; CHECK-NEXT: vfmv.s.f v12, fa0
937 ; CHECK-NEXT: vfredosum.vs v8, v8, v12
938 ; CHECK-NEXT: vfmv.f.s fa0, v8
940 %v = load <8 x double>, ptr %x
941 %red = call double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v)
945 define double @vreduce_fwadd_v8f64(ptr %x, double %s) {
946 ; CHECK-LABEL: vreduce_fwadd_v8f64:
948 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
949 ; CHECK-NEXT: vle32.v v8, (a0)
950 ; CHECK-NEXT: vfmv.s.f v10, fa0
951 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
952 ; CHECK-NEXT: vfwredusum.vs v8, v8, v10
953 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
954 ; CHECK-NEXT: vfmv.f.s fa0, v8
956 %v = load <8 x float>, ptr %x
957 %e = fpext <8 x float> %v to <8 x double>
958 %red = call reassoc double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %e)
962 define double @vreduce_ord_fwadd_v8f64(ptr %x, double %s) {
963 ; CHECK-LABEL: vreduce_ord_fwadd_v8f64:
965 ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
966 ; CHECK-NEXT: vle32.v v8, (a0)
967 ; CHECK-NEXT: vfmv.s.f v10, fa0
968 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
969 ; CHECK-NEXT: vfwredosum.vs v8, v8, v10
970 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
971 ; CHECK-NEXT: vfmv.f.s fa0, v8
973 %v = load <8 x float>, ptr %x
974 %e = fpext <8 x float> %v to <8 x double>
975 %red = call double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %e)
979 declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>)
981 define double @vreduce_fadd_v16f64(ptr %x, double %s) {
982 ; CHECK-LABEL: vreduce_fadd_v16f64:
984 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
985 ; CHECK-NEXT: vle64.v v8, (a0)
986 ; CHECK-NEXT: vfmv.s.f v16, fa0
987 ; CHECK-NEXT: vfredusum.vs v8, v8, v16
988 ; CHECK-NEXT: vfmv.f.s fa0, v8
990 %v = load <16 x double>, ptr %x
991 %red = call reassoc double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v)
995 define double @vreduce_ord_fadd_v16f64(ptr %x, double %s) {
996 ; CHECK-LABEL: vreduce_ord_fadd_v16f64:
998 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
999 ; CHECK-NEXT: vle64.v v8, (a0)
1000 ; CHECK-NEXT: vfmv.s.f v16, fa0
1001 ; CHECK-NEXT: vfredosum.vs v8, v8, v16
1002 ; CHECK-NEXT: vfmv.f.s fa0, v8
1004 %v = load <16 x double>, ptr %x
1005 %red = call double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v)
1009 define double @vreduce_fwadd_v16f64(ptr %x, double %s) {
1010 ; CHECK-LABEL: vreduce_fwadd_v16f64:
1012 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1013 ; CHECK-NEXT: vle32.v v8, (a0)
1014 ; CHECK-NEXT: vfmv.s.f v12, fa0
1015 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1016 ; CHECK-NEXT: vfwredusum.vs v8, v8, v12
1017 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1018 ; CHECK-NEXT: vfmv.f.s fa0, v8
1020 %v = load <16 x float>, ptr %x
1021 %e = fpext <16 x float> %v to <16 x double>
1022 %red = call reassoc double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %e)
1026 define double @vreduce_ord_fwadd_v16f64(ptr %x, double %s) {
1027 ; CHECK-LABEL: vreduce_ord_fwadd_v16f64:
1029 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1030 ; CHECK-NEXT: vle32.v v8, (a0)
1031 ; CHECK-NEXT: vfmv.s.f v12, fa0
1032 ; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
1033 ; CHECK-NEXT: vfwredosum.vs v8, v8, v12
1034 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1035 ; CHECK-NEXT: vfmv.f.s fa0, v8
1037 %v = load <16 x float>, ptr %x
1038 %e = fpext <16 x float> %v to <16 x double>
1039 %red = call double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %e)
1043 declare double @llvm.vector.reduce.fadd.v32f64(double, <32 x double>)
1045 define double @vreduce_fadd_v32f64(ptr %x, double %s) {
1046 ; CHECK-LABEL: vreduce_fadd_v32f64:
1048 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1049 ; CHECK-NEXT: vle64.v v8, (a0)
1050 ; CHECK-NEXT: addi a0, a0, 128
1051 ; CHECK-NEXT: vle64.v v16, (a0)
1052 ; CHECK-NEXT: vfadd.vv v8, v8, v16
1053 ; CHECK-NEXT: vfmv.s.f v16, fa0
1054 ; CHECK-NEXT: vfredusum.vs v8, v8, v16
1055 ; CHECK-NEXT: vfmv.f.s fa0, v8
1057 %v = load <32 x double>, ptr %x
1058 %red = call reassoc double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v)
1062 define double @vreduce_ord_fadd_v32f64(ptr %x, double %s) {
1063 ; CHECK-LABEL: vreduce_ord_fadd_v32f64:
1065 ; CHECK-NEXT: addi a1, a0, 128
1066 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1067 ; CHECK-NEXT: vle64.v v8, (a0)
1068 ; CHECK-NEXT: vle64.v v16, (a1)
1069 ; CHECK-NEXT: vfmv.s.f v24, fa0
1070 ; CHECK-NEXT: vfredosum.vs v8, v8, v24
1071 ; CHECK-NEXT: vfredosum.vs v8, v16, v8
1072 ; CHECK-NEXT: vfmv.f.s fa0, v8
1074 %v = load <32 x double>, ptr %x
1075 %red = call double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v)
1079 define double @vreduce_fwadd_v32f64(ptr %x, double %s) {
1080 ; CHECK-LABEL: vreduce_fwadd_v32f64:
1082 ; CHECK-NEXT: li a1, 32
1083 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1084 ; CHECK-NEXT: vle32.v v8, (a0)
1085 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
1086 ; CHECK-NEXT: vslidedown.vi v16, v8, 16
1087 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1088 ; CHECK-NEXT: vfwadd.vv v24, v8, v16
1089 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
1090 ; CHECK-NEXT: vfmv.s.f v8, fa0
1091 ; CHECK-NEXT: vfredusum.vs v8, v24, v8
1092 ; CHECK-NEXT: vfmv.f.s fa0, v8
1094 %v = load <32 x float>, ptr %x
1095 %e = fpext <32 x float> %v to <32 x double>
1096 %red = call reassoc double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %e)
1100 define double @vreduce_ord_fwadd_v32f64(ptr %x, double %s) {
1101 ; CHECK-LABEL: vreduce_ord_fwadd_v32f64:
1103 ; CHECK-NEXT: li a1, 32
1104 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1105 ; CHECK-NEXT: vle32.v v8, (a0)
1106 ; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
1107 ; CHECK-NEXT: vslidedown.vi v16, v8, 16
1108 ; CHECK-NEXT: vsetivli zero, 16, e64, m1, ta, ma
1109 ; CHECK-NEXT: vfmv.s.f v12, fa0
1110 ; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
1111 ; CHECK-NEXT: vfwredosum.vs v8, v8, v12
1112 ; CHECK-NEXT: vfwredosum.vs v8, v16, v8
1113 ; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
1114 ; CHECK-NEXT: vfmv.f.s fa0, v8
1116 %v = load <32 x float>, ptr %x
1117 %e = fpext <32 x float> %v to <32 x double>
1118 %red = call double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %e)
1122 declare half @llvm.vector.reduce.fmin.v2f16(<2 x half>)
1124 define half @vreduce_fmin_v2f16(ptr %x) {
1125 ; CHECK-LABEL: vreduce_fmin_v2f16:
1127 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1128 ; CHECK-NEXT: vle16.v v8, (a0)
1129 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1130 ; CHECK-NEXT: vfmv.f.s fa0, v8
1132 %v = load <2 x half>, ptr %x
1133 %red = call half @llvm.vector.reduce.fmin.v2f16(<2 x half> %v)
1137 declare half @llvm.vector.reduce.fmin.v4f16(<4 x half>)
1139 define half @vreduce_fmin_v4f16(ptr %x) {
1140 ; CHECK-LABEL: vreduce_fmin_v4f16:
1142 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1143 ; CHECK-NEXT: vle16.v v8, (a0)
1144 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1145 ; CHECK-NEXT: vfmv.f.s fa0, v8
1147 %v = load <4 x half>, ptr %x
1148 %red = call half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v)
1152 define half @vreduce_fmin_v4f16_nonans(ptr %x) {
1153 ; CHECK-LABEL: vreduce_fmin_v4f16_nonans:
1155 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1156 ; CHECK-NEXT: vle16.v v8, (a0)
1157 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1158 ; CHECK-NEXT: vfmv.f.s fa0, v8
1160 %v = load <4 x half>, ptr %x
1161 %red = call nnan half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v)
1165 define half @vreduce_fmin_v4f16_nonans_noinfs(ptr %x) {
1166 ; CHECK-LABEL: vreduce_fmin_v4f16_nonans_noinfs:
1168 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1169 ; CHECK-NEXT: vle16.v v8, (a0)
1170 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1171 ; CHECK-NEXT: vfmv.f.s fa0, v8
1173 %v = load <4 x half>, ptr %x
1174 %red = call nnan ninf half @llvm.vector.reduce.fmin.v4f16(<4 x half> %v)
1178 declare half @llvm.vector.reduce.fmin.v128f16(<128 x half>)
1180 define half @vreduce_fmin_v128f16(ptr %x) {
1181 ; CHECK-LABEL: vreduce_fmin_v128f16:
1183 ; CHECK-NEXT: li a1, 64
1184 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1185 ; CHECK-NEXT: vle16.v v8, (a0)
1186 ; CHECK-NEXT: addi a0, a0, 128
1187 ; CHECK-NEXT: vle16.v v16, (a0)
1188 ; CHECK-NEXT: vfmin.vv v8, v8, v16
1189 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1190 ; CHECK-NEXT: vfmv.f.s fa0, v8
1192 %v = load <128 x half>, ptr %x
1193 %red = call half @llvm.vector.reduce.fmin.v128f16(<128 x half> %v)
1197 declare float @llvm.vector.reduce.fmin.v2f32(<2 x float>)
1199 define float @vreduce_fmin_v2f32(ptr %x) {
1200 ; CHECK-LABEL: vreduce_fmin_v2f32:
1202 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1203 ; CHECK-NEXT: vle32.v v8, (a0)
1204 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1205 ; CHECK-NEXT: vfmv.f.s fa0, v8
1207 %v = load <2 x float>, ptr %x
1208 %red = call float @llvm.vector.reduce.fmin.v2f32(<2 x float> %v)
1212 declare float @llvm.vector.reduce.fmin.v4f32(<4 x float>)
1214 define float @vreduce_fmin_v4f32(ptr %x) {
1215 ; CHECK-LABEL: vreduce_fmin_v4f32:
1217 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1218 ; CHECK-NEXT: vle32.v v8, (a0)
1219 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1220 ; CHECK-NEXT: vfmv.f.s fa0, v8
1222 %v = load <4 x float>, ptr %x
1223 %red = call float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v)
1227 define float @vreduce_fmin_v4f32_nonans(ptr %x) {
1228 ; CHECK-LABEL: vreduce_fmin_v4f32_nonans:
1230 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1231 ; CHECK-NEXT: vle32.v v8, (a0)
1232 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1233 ; CHECK-NEXT: vfmv.f.s fa0, v8
1235 %v = load <4 x float>, ptr %x
1236 %red = call nnan float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v)
1240 define float @vreduce_fmin_v4f32_nonans_noinfs(ptr %x) {
1241 ; CHECK-LABEL: vreduce_fmin_v4f32_nonans_noinfs:
1243 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1244 ; CHECK-NEXT: vle32.v v8, (a0)
1245 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1246 ; CHECK-NEXT: vfmv.f.s fa0, v8
1248 %v = load <4 x float>, ptr %x
1249 %red = call nnan ninf float @llvm.vector.reduce.fmin.v4f32(<4 x float> %v)
1253 declare float @llvm.vector.reduce.fmin.v128f32(<128 x float>)
1255 define float @vreduce_fmin_v128f32(ptr %x) {
1256 ; CHECK-LABEL: vreduce_fmin_v128f32:
1258 ; CHECK-NEXT: li a1, 32
1259 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1260 ; CHECK-NEXT: vle32.v v8, (a0)
1261 ; CHECK-NEXT: addi a1, a0, 384
1262 ; CHECK-NEXT: vle32.v v16, (a1)
1263 ; CHECK-NEXT: addi a1, a0, 256
1264 ; CHECK-NEXT: addi a0, a0, 128
1265 ; CHECK-NEXT: vle32.v v24, (a0)
1266 ; CHECK-NEXT: vle32.v v0, (a1)
1267 ; CHECK-NEXT: vfmin.vv v16, v24, v16
1268 ; CHECK-NEXT: vfmin.vv v8, v8, v0
1269 ; CHECK-NEXT: vfmin.vv v8, v8, v16
1270 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1271 ; CHECK-NEXT: vfmv.f.s fa0, v8
1273 %v = load <128 x float>, ptr %x
1274 %red = call float @llvm.vector.reduce.fmin.v128f32(<128 x float> %v)
1278 declare double @llvm.vector.reduce.fmin.v2f64(<2 x double>)
1280 define double @vreduce_fmin_v2f64(ptr %x) {
1281 ; CHECK-LABEL: vreduce_fmin_v2f64:
1283 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1284 ; CHECK-NEXT: vle64.v v8, (a0)
1285 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1286 ; CHECK-NEXT: vfmv.f.s fa0, v8
1288 %v = load <2 x double>, ptr %x
1289 %red = call double @llvm.vector.reduce.fmin.v2f64(<2 x double> %v)
1293 declare double @llvm.vector.reduce.fmin.v4f64(<4 x double>)
1295 define double @vreduce_fmin_v4f64(ptr %x) {
1296 ; CHECK-LABEL: vreduce_fmin_v4f64:
1298 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1299 ; CHECK-NEXT: vle64.v v8, (a0)
1300 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1301 ; CHECK-NEXT: vfmv.f.s fa0, v8
1303 %v = load <4 x double>, ptr %x
1304 %red = call double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v)
1308 define double @vreduce_fmin_v4f64_nonans(ptr %x) {
1309 ; CHECK-LABEL: vreduce_fmin_v4f64_nonans:
1311 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1312 ; CHECK-NEXT: vle64.v v8, (a0)
1313 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1314 ; CHECK-NEXT: vfmv.f.s fa0, v8
1316 %v = load <4 x double>, ptr %x
1317 %red = call nnan double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v)
1321 define double @vreduce_fmin_v4f64_nonans_noinfs(ptr %x) {
1322 ; CHECK-LABEL: vreduce_fmin_v4f64_nonans_noinfs:
1324 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1325 ; CHECK-NEXT: vle64.v v8, (a0)
1326 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1327 ; CHECK-NEXT: vfmv.f.s fa0, v8
1329 %v = load <4 x double>, ptr %x
1330 %red = call nnan ninf double @llvm.vector.reduce.fmin.v4f64(<4 x double> %v)
1334 declare double @llvm.vector.reduce.fmin.v32f64(<32 x double>)
1336 define double @vreduce_fmin_v32f64(ptr %x) {
1337 ; CHECK-LABEL: vreduce_fmin_v32f64:
1339 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1340 ; CHECK-NEXT: vle64.v v8, (a0)
1341 ; CHECK-NEXT: addi a0, a0, 128
1342 ; CHECK-NEXT: vle64.v v16, (a0)
1343 ; CHECK-NEXT: vfmin.vv v8, v8, v16
1344 ; CHECK-NEXT: vfredmin.vs v8, v8, v8
1345 ; CHECK-NEXT: vfmv.f.s fa0, v8
1347 %v = load <32 x double>, ptr %x
1348 %red = call double @llvm.vector.reduce.fmin.v32f64(<32 x double> %v)
1352 declare half @llvm.vector.reduce.fmax.v2f16(<2 x half>)
1354 define half @vreduce_fmax_v2f16(ptr %x) {
1355 ; CHECK-LABEL: vreduce_fmax_v2f16:
1357 ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
1358 ; CHECK-NEXT: vle16.v v8, (a0)
1359 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1360 ; CHECK-NEXT: vfmv.f.s fa0, v8
1362 %v = load <2 x half>, ptr %x
1363 %red = call half @llvm.vector.reduce.fmax.v2f16(<2 x half> %v)
1367 declare half @llvm.vector.reduce.fmax.v4f16(<4 x half>)
1369 define half @vreduce_fmax_v4f16(ptr %x) {
1370 ; CHECK-LABEL: vreduce_fmax_v4f16:
1372 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1373 ; CHECK-NEXT: vle16.v v8, (a0)
1374 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1375 ; CHECK-NEXT: vfmv.f.s fa0, v8
1377 %v = load <4 x half>, ptr %x
1378 %red = call half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v)
1382 define half @vreduce_fmax_v4f16_nonans(ptr %x) {
1383 ; CHECK-LABEL: vreduce_fmax_v4f16_nonans:
1385 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1386 ; CHECK-NEXT: vle16.v v8, (a0)
1387 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1388 ; CHECK-NEXT: vfmv.f.s fa0, v8
1390 %v = load <4 x half>, ptr %x
1391 %red = call nnan half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v)
1395 define half @vreduce_fmax_v4f16_nonans_noinfs(ptr %x) {
1396 ; CHECK-LABEL: vreduce_fmax_v4f16_nonans_noinfs:
1398 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
1399 ; CHECK-NEXT: vle16.v v8, (a0)
1400 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1401 ; CHECK-NEXT: vfmv.f.s fa0, v8
1403 %v = load <4 x half>, ptr %x
1404 %red = call nnan ninf half @llvm.vector.reduce.fmax.v4f16(<4 x half> %v)
1408 declare half @llvm.vector.reduce.fmax.v128f16(<128 x half>)
1410 define half @vreduce_fmax_v128f16(ptr %x) {
1411 ; CHECK-LABEL: vreduce_fmax_v128f16:
1413 ; CHECK-NEXT: li a1, 64
1414 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1415 ; CHECK-NEXT: vle16.v v8, (a0)
1416 ; CHECK-NEXT: addi a0, a0, 128
1417 ; CHECK-NEXT: vle16.v v16, (a0)
1418 ; CHECK-NEXT: vfmax.vv v8, v8, v16
1419 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1420 ; CHECK-NEXT: vfmv.f.s fa0, v8
1422 %v = load <128 x half>, ptr %x
1423 %red = call half @llvm.vector.reduce.fmax.v128f16(<128 x half> %v)
1427 declare float @llvm.vector.reduce.fmax.v2f32(<2 x float>)
1429 define float @vreduce_fmax_v2f32(ptr %x) {
1430 ; CHECK-LABEL: vreduce_fmax_v2f32:
1432 ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
1433 ; CHECK-NEXT: vle32.v v8, (a0)
1434 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1435 ; CHECK-NEXT: vfmv.f.s fa0, v8
1437 %v = load <2 x float>, ptr %x
1438 %red = call float @llvm.vector.reduce.fmax.v2f32(<2 x float> %v)
1442 declare float @llvm.vector.reduce.fmax.v4f32(<4 x float>)
1444 define float @vreduce_fmax_v4f32(ptr %x) {
1445 ; CHECK-LABEL: vreduce_fmax_v4f32:
1447 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1448 ; CHECK-NEXT: vle32.v v8, (a0)
1449 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1450 ; CHECK-NEXT: vfmv.f.s fa0, v8
1452 %v = load <4 x float>, ptr %x
1453 %red = call float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v)
1457 define float @vreduce_fmax_v4f32_nonans(ptr %x) {
1458 ; CHECK-LABEL: vreduce_fmax_v4f32_nonans:
1460 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1461 ; CHECK-NEXT: vle32.v v8, (a0)
1462 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1463 ; CHECK-NEXT: vfmv.f.s fa0, v8
1465 %v = load <4 x float>, ptr %x
1466 %red = call nnan float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v)
1470 define float @vreduce_fmax_v4f32_nonans_noinfs(ptr %x) {
1471 ; CHECK-LABEL: vreduce_fmax_v4f32_nonans_noinfs:
1473 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1474 ; CHECK-NEXT: vle32.v v8, (a0)
1475 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1476 ; CHECK-NEXT: vfmv.f.s fa0, v8
1478 %v = load <4 x float>, ptr %x
1479 %red = call nnan ninf float @llvm.vector.reduce.fmax.v4f32(<4 x float> %v)
1483 declare float @llvm.vector.reduce.fmax.v128f32(<128 x float>)
1485 define float @vreduce_fmax_v128f32(ptr %x) {
1486 ; CHECK-LABEL: vreduce_fmax_v128f32:
1488 ; CHECK-NEXT: li a1, 32
1489 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1490 ; CHECK-NEXT: vle32.v v8, (a0)
1491 ; CHECK-NEXT: addi a1, a0, 384
1492 ; CHECK-NEXT: vle32.v v16, (a1)
1493 ; CHECK-NEXT: addi a1, a0, 256
1494 ; CHECK-NEXT: addi a0, a0, 128
1495 ; CHECK-NEXT: vle32.v v24, (a0)
1496 ; CHECK-NEXT: vle32.v v0, (a1)
1497 ; CHECK-NEXT: vfmax.vv v16, v24, v16
1498 ; CHECK-NEXT: vfmax.vv v8, v8, v0
1499 ; CHECK-NEXT: vfmax.vv v8, v8, v16
1500 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1501 ; CHECK-NEXT: vfmv.f.s fa0, v8
1503 %v = load <128 x float>, ptr %x
1504 %red = call float @llvm.vector.reduce.fmax.v128f32(<128 x float> %v)
1508 declare double @llvm.vector.reduce.fmax.v2f64(<2 x double>)
1510 define double @vreduce_fmax_v2f64(ptr %x) {
1511 ; CHECK-LABEL: vreduce_fmax_v2f64:
1513 ; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
1514 ; CHECK-NEXT: vle64.v v8, (a0)
1515 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1516 ; CHECK-NEXT: vfmv.f.s fa0, v8
1518 %v = load <2 x double>, ptr %x
1519 %red = call double @llvm.vector.reduce.fmax.v2f64(<2 x double> %v)
1523 declare double @llvm.vector.reduce.fmax.v4f64(<4 x double>)
1525 define double @vreduce_fmax_v4f64(ptr %x) {
1526 ; CHECK-LABEL: vreduce_fmax_v4f64:
1528 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1529 ; CHECK-NEXT: vle64.v v8, (a0)
1530 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1531 ; CHECK-NEXT: vfmv.f.s fa0, v8
1533 %v = load <4 x double>, ptr %x
1534 %red = call double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v)
1538 define double @vreduce_fmax_v4f64_nonans(ptr %x) {
1539 ; CHECK-LABEL: vreduce_fmax_v4f64_nonans:
1541 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1542 ; CHECK-NEXT: vle64.v v8, (a0)
1543 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1544 ; CHECK-NEXT: vfmv.f.s fa0, v8
1546 %v = load <4 x double>, ptr %x
1547 %red = call nnan double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v)
1551 define double @vreduce_fmax_v4f64_nonans_noinfs(ptr %x) {
1552 ; CHECK-LABEL: vreduce_fmax_v4f64_nonans_noinfs:
1554 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
1555 ; CHECK-NEXT: vle64.v v8, (a0)
1556 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1557 ; CHECK-NEXT: vfmv.f.s fa0, v8
1559 %v = load <4 x double>, ptr %x
1560 %red = call nnan ninf double @llvm.vector.reduce.fmax.v4f64(<4 x double> %v)
1564 declare double @llvm.vector.reduce.fmax.v32f64(<32 x double>)
1566 define double @vreduce_fmax_v32f64(ptr %x) {
1567 ; CHECK-LABEL: vreduce_fmax_v32f64:
1569 ; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
1570 ; CHECK-NEXT: vle64.v v8, (a0)
1571 ; CHECK-NEXT: addi a0, a0, 128
1572 ; CHECK-NEXT: vle64.v v16, (a0)
1573 ; CHECK-NEXT: vfmax.vv v8, v8, v16
1574 ; CHECK-NEXT: vfredmax.vs v8, v8, v8
1575 ; CHECK-NEXT: vfmv.f.s fa0, v8
1577 %v = load <32 x double>, ptr %x
1578 %red = call double @llvm.vector.reduce.fmax.v32f64(<32 x double> %v)
1582 define float @vreduce_nsz_fadd_v4f32(ptr %x, float %s) {
1583 ; CHECK-LABEL: vreduce_nsz_fadd_v4f32:
1585 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
1586 ; CHECK-NEXT: vle32.v v8, (a0)
1587 ; CHECK-NEXT: vfmv.s.f v9, fa0
1588 ; CHECK-NEXT: vfredusum.vs v8, v8, v9
1589 ; CHECK-NEXT: vfmv.f.s fa0, v8
1591 %v = load <4 x float>, ptr %x
1592 %red = call reassoc nsz float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v)