1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
13 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv1f16_nxv4f16:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: fsrmi a1, 0
17 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
18 ; CHECK-NEXT: vfredusum.vs v8, v9, v10
22 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16(
23 <vscale x 4 x half> %0,
24 <vscale x 1 x half> %1,
25 <vscale x 4 x half> %2,
28 ret <vscale x 4 x half> %a
31 declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1(
38 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: fsrmi a1, 0
42 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
43 ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t
47 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.nxv1i1(
48 <vscale x 4 x half> %0,
49 <vscale x 1 x half> %1,
50 <vscale x 4 x half> %2,
54 ret <vscale x 4 x half> %a
57 declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
63 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
64 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv2f16_nxv4f16:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: fsrmi a1, 0
67 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
68 ; CHECK-NEXT: vfredusum.vs v8, v9, v10
72 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16(
73 <vscale x 4 x half> %0,
74 <vscale x 2 x half> %1,
75 <vscale x 4 x half> %2,
78 ret <vscale x 4 x half> %a
81 declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1(
88 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
90 ; CHECK: # %bb.0: # %entry
91 ; CHECK-NEXT: fsrmi a1, 0
92 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
93 ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t
97 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.nxv2i1(
98 <vscale x 4 x half> %0,
99 <vscale x 2 x half> %1,
100 <vscale x 4 x half> %2,
101 <vscale x 2 x i1> %3,
104 ret <vscale x 4 x half> %a
107 declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
113 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
114 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv4f16_nxv4f16:
115 ; CHECK: # %bb.0: # %entry
116 ; CHECK-NEXT: fsrmi a1, 0
117 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
118 ; CHECK-NEXT: vfredusum.vs v8, v9, v10
119 ; CHECK-NEXT: fsrm a1
122 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16(
123 <vscale x 4 x half> %0,
124 <vscale x 4 x half> %1,
125 <vscale x 4 x half> %2,
128 ret <vscale x 4 x half> %a
131 declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1(
138 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
139 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: fsrmi a1, 0
142 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
143 ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t
144 ; CHECK-NEXT: fsrm a1
147 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.nxv4i1(
148 <vscale x 4 x half> %0,
149 <vscale x 4 x half> %1,
150 <vscale x 4 x half> %2,
151 <vscale x 4 x i1> %3,
154 ret <vscale x 4 x half> %a
157 declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
163 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
164 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv8f16_nxv4f16:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: fsrmi a1, 0
167 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
168 ; CHECK-NEXT: vfredusum.vs v8, v10, v9
169 ; CHECK-NEXT: fsrm a1
172 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16(
173 <vscale x 4 x half> %0,
174 <vscale x 8 x half> %1,
175 <vscale x 4 x half> %2,
178 ret <vscale x 4 x half> %a
181 declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1(
188 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
189 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
190 ; CHECK: # %bb.0: # %entry
191 ; CHECK-NEXT: fsrmi a1, 0
192 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
193 ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t
194 ; CHECK-NEXT: fsrm a1
197 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.nxv8i1(
198 <vscale x 4 x half> %0,
199 <vscale x 8 x half> %1,
200 <vscale x 4 x half> %2,
201 <vscale x 8 x i1> %3,
204 ret <vscale x 4 x half> %a
207 declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
209 <vscale x 16 x half>,
213 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
214 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv16f16_nxv4f16:
215 ; CHECK: # %bb.0: # %entry
216 ; CHECK-NEXT: fsrmi a1, 0
217 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
218 ; CHECK-NEXT: vfredusum.vs v8, v12, v9
219 ; CHECK-NEXT: fsrm a1
222 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16(
223 <vscale x 4 x half> %0,
224 <vscale x 16 x half> %1,
225 <vscale x 4 x half> %2,
228 ret <vscale x 4 x half> %a
231 declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1(
233 <vscale x 16 x half>,
238 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
239 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
240 ; CHECK: # %bb.0: # %entry
241 ; CHECK-NEXT: fsrmi a1, 0
242 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
243 ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t
244 ; CHECK-NEXT: fsrm a1
247 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.nxv16i1(
248 <vscale x 4 x half> %0,
249 <vscale x 16 x half> %1,
250 <vscale x 4 x half> %2,
251 <vscale x 16 x i1> %3,
254 ret <vscale x 4 x half> %a
257 declare <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
259 <vscale x 32 x half>,
263 define <vscale x 4 x half> @intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
264 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv4f16_nxv32f16_nxv4f16:
265 ; CHECK: # %bb.0: # %entry
266 ; CHECK-NEXT: fsrmi a1, 0
267 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
268 ; CHECK-NEXT: vfredusum.vs v8, v16, v9
269 ; CHECK-NEXT: fsrm a1
272 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16(
273 <vscale x 4 x half> %0,
274 <vscale x 32 x half> %1,
275 <vscale x 4 x half> %2,
278 ret <vscale x 4 x half> %a
281 declare <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1(
283 <vscale x 32 x half>,
288 define <vscale x 4 x half> @intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
289 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
290 ; CHECK: # %bb.0: # %entry
291 ; CHECK-NEXT: fsrmi a1, 0
292 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, tu, ma
293 ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t
294 ; CHECK-NEXT: fsrm a1
297 %a = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.nxv32i1(
298 <vscale x 4 x half> %0,
299 <vscale x 32 x half> %1,
300 <vscale x 4 x half> %2,
301 <vscale x 32 x i1> %3,
304 ret <vscale x 4 x half> %a
307 declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
308 <vscale x 2 x float>,
309 <vscale x 1 x float>,
310 <vscale x 2 x float>,
313 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
314 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv1f32_nxv2f32:
315 ; CHECK: # %bb.0: # %entry
316 ; CHECK-NEXT: fsrmi a1, 0
317 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
318 ; CHECK-NEXT: vfredusum.vs v8, v9, v10
319 ; CHECK-NEXT: fsrm a1
322 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32(
323 <vscale x 2 x float> %0,
324 <vscale x 1 x float> %1,
325 <vscale x 2 x float> %2,
328 ret <vscale x 2 x float> %a
331 declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1(
332 <vscale x 2 x float>,
333 <vscale x 1 x float>,
334 <vscale x 2 x float>,
338 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
339 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
340 ; CHECK: # %bb.0: # %entry
341 ; CHECK-NEXT: fsrmi a1, 0
342 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
343 ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t
344 ; CHECK-NEXT: fsrm a1
347 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.nxv1i1(
348 <vscale x 2 x float> %0,
349 <vscale x 1 x float> %1,
350 <vscale x 2 x float> %2,
351 <vscale x 1 x i1> %3,
354 ret <vscale x 2 x float> %a
357 declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
358 <vscale x 2 x float>,
359 <vscale x 2 x float>,
360 <vscale x 2 x float>,
363 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
364 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv2f32_nxv2f32:
365 ; CHECK: # %bb.0: # %entry
366 ; CHECK-NEXT: fsrmi a1, 0
367 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
368 ; CHECK-NEXT: vfredusum.vs v8, v9, v10
369 ; CHECK-NEXT: fsrm a1
372 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32(
373 <vscale x 2 x float> %0,
374 <vscale x 2 x float> %1,
375 <vscale x 2 x float> %2,
378 ret <vscale x 2 x float> %a
381 declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1(
382 <vscale x 2 x float>,
383 <vscale x 2 x float>,
384 <vscale x 2 x float>,
388 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
389 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
390 ; CHECK: # %bb.0: # %entry
391 ; CHECK-NEXT: fsrmi a1, 0
392 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
393 ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t
394 ; CHECK-NEXT: fsrm a1
397 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.nxv2i1(
398 <vscale x 2 x float> %0,
399 <vscale x 2 x float> %1,
400 <vscale x 2 x float> %2,
401 <vscale x 2 x i1> %3,
404 ret <vscale x 2 x float> %a
407 declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
408 <vscale x 2 x float>,
409 <vscale x 4 x float>,
410 <vscale x 2 x float>,
413 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
414 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv4f32_nxv2f32:
415 ; CHECK: # %bb.0: # %entry
416 ; CHECK-NEXT: fsrmi a1, 0
417 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
418 ; CHECK-NEXT: vfredusum.vs v8, v10, v9
419 ; CHECK-NEXT: fsrm a1
422 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32(
423 <vscale x 2 x float> %0,
424 <vscale x 4 x float> %1,
425 <vscale x 2 x float> %2,
428 ret <vscale x 2 x float> %a
431 declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1(
432 <vscale x 2 x float>,
433 <vscale x 4 x float>,
434 <vscale x 2 x float>,
438 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
439 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
440 ; CHECK: # %bb.0: # %entry
441 ; CHECK-NEXT: fsrmi a1, 0
442 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
443 ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t
444 ; CHECK-NEXT: fsrm a1
447 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.nxv4i1(
448 <vscale x 2 x float> %0,
449 <vscale x 4 x float> %1,
450 <vscale x 2 x float> %2,
451 <vscale x 4 x i1> %3,
454 ret <vscale x 2 x float> %a
457 declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
458 <vscale x 2 x float>,
459 <vscale x 8 x float>,
460 <vscale x 2 x float>,
463 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
464 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv8f32_nxv2f32:
465 ; CHECK: # %bb.0: # %entry
466 ; CHECK-NEXT: fsrmi a1, 0
467 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
468 ; CHECK-NEXT: vfredusum.vs v8, v12, v9
469 ; CHECK-NEXT: fsrm a1
472 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32(
473 <vscale x 2 x float> %0,
474 <vscale x 8 x float> %1,
475 <vscale x 2 x float> %2,
478 ret <vscale x 2 x float> %a
481 declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1(
482 <vscale x 2 x float>,
483 <vscale x 8 x float>,
484 <vscale x 2 x float>,
488 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
489 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
490 ; CHECK: # %bb.0: # %entry
491 ; CHECK-NEXT: fsrmi a1, 0
492 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
493 ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t
494 ; CHECK-NEXT: fsrm a1
497 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.nxv8i1(
498 <vscale x 2 x float> %0,
499 <vscale x 8 x float> %1,
500 <vscale x 2 x float> %2,
501 <vscale x 8 x i1> %3,
504 ret <vscale x 2 x float> %a
507 declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
508 <vscale x 2 x float>,
509 <vscale x 16 x float>,
510 <vscale x 2 x float>,
513 define <vscale x 2 x float> @intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
514 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv2f32_nxv16f32_nxv2f32:
515 ; CHECK: # %bb.0: # %entry
516 ; CHECK-NEXT: fsrmi a1, 0
517 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
518 ; CHECK-NEXT: vfredusum.vs v8, v16, v9
519 ; CHECK-NEXT: fsrm a1
522 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32(
523 <vscale x 2 x float> %0,
524 <vscale x 16 x float> %1,
525 <vscale x 2 x float> %2,
528 ret <vscale x 2 x float> %a
531 declare <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1(
532 <vscale x 2 x float>,
533 <vscale x 16 x float>,
534 <vscale x 2 x float>,
538 define <vscale x 2 x float> @intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
539 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
540 ; CHECK: # %bb.0: # %entry
541 ; CHECK-NEXT: fsrmi a1, 0
542 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma
543 ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t
544 ; CHECK-NEXT: fsrm a1
547 %a = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.nxv16i1(
548 <vscale x 2 x float> %0,
549 <vscale x 16 x float> %1,
550 <vscale x 2 x float> %2,
551 <vscale x 16 x i1> %3,
554 ret <vscale x 2 x float> %a
557 declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
558 <vscale x 1 x double>,
559 <vscale x 1 x double>,
560 <vscale x 1 x double>,
563 define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
564 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv1f64_nxv1f64:
565 ; CHECK: # %bb.0: # %entry
566 ; CHECK-NEXT: fsrmi a1, 0
567 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
568 ; CHECK-NEXT: vfredusum.vs v8, v9, v10
569 ; CHECK-NEXT: fsrm a1
572 %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64(
573 <vscale x 1 x double> %0,
574 <vscale x 1 x double> %1,
575 <vscale x 1 x double> %2,
578 ret <vscale x 1 x double> %a
581 declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1(
582 <vscale x 1 x double>,
583 <vscale x 1 x double>,
584 <vscale x 1 x double>,
588 define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
589 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: fsrmi a1, 0
592 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
593 ; CHECK-NEXT: vfredusum.vs v8, v9, v10, v0.t
594 ; CHECK-NEXT: fsrm a1
597 %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.nxv1i1(
598 <vscale x 1 x double> %0,
599 <vscale x 1 x double> %1,
600 <vscale x 1 x double> %2,
601 <vscale x 1 x i1> %3,
604 ret <vscale x 1 x double> %a
607 declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
608 <vscale x 1 x double>,
609 <vscale x 2 x double>,
610 <vscale x 1 x double>,
613 define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
614 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv2f64_nxv1f64:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: fsrmi a1, 0
617 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
618 ; CHECK-NEXT: vfredusum.vs v8, v10, v9
619 ; CHECK-NEXT: fsrm a1
622 %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64(
623 <vscale x 1 x double> %0,
624 <vscale x 2 x double> %1,
625 <vscale x 1 x double> %2,
628 ret <vscale x 1 x double> %a
631 declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1(
632 <vscale x 1 x double>,
633 <vscale x 2 x double>,
634 <vscale x 1 x double>,
638 define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
639 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
640 ; CHECK: # %bb.0: # %entry
641 ; CHECK-NEXT: fsrmi a1, 0
642 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
643 ; CHECK-NEXT: vfredusum.vs v8, v10, v9, v0.t
644 ; CHECK-NEXT: fsrm a1
647 %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.nxv2i1(
648 <vscale x 1 x double> %0,
649 <vscale x 2 x double> %1,
650 <vscale x 1 x double> %2,
651 <vscale x 2 x i1> %3,
654 ret <vscale x 1 x double> %a
657 declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
658 <vscale x 1 x double>,
659 <vscale x 4 x double>,
660 <vscale x 1 x double>,
663 define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
664 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv4f64_nxv1f64:
665 ; CHECK: # %bb.0: # %entry
666 ; CHECK-NEXT: fsrmi a1, 0
667 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
668 ; CHECK-NEXT: vfredusum.vs v8, v12, v9
669 ; CHECK-NEXT: fsrm a1
672 %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64(
673 <vscale x 1 x double> %0,
674 <vscale x 4 x double> %1,
675 <vscale x 1 x double> %2,
678 ret <vscale x 1 x double> %a
681 declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1(
682 <vscale x 1 x double>,
683 <vscale x 4 x double>,
684 <vscale x 1 x double>,
688 define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
689 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
690 ; CHECK: # %bb.0: # %entry
691 ; CHECK-NEXT: fsrmi a1, 0
692 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
693 ; CHECK-NEXT: vfredusum.vs v8, v12, v9, v0.t
694 ; CHECK-NEXT: fsrm a1
697 %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.nxv4i1(
698 <vscale x 1 x double> %0,
699 <vscale x 4 x double> %1,
700 <vscale x 1 x double> %2,
701 <vscale x 4 x i1> %3,
704 ret <vscale x 1 x double> %a
707 declare <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
708 <vscale x 1 x double>,
709 <vscale x 8 x double>,
710 <vscale x 1 x double>,
713 define <vscale x 1 x double> @intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
714 ; CHECK-LABEL: intrinsic_vfredusum_vs_nxv1f64_nxv8f64_nxv1f64:
715 ; CHECK: # %bb.0: # %entry
716 ; CHECK-NEXT: fsrmi a1, 0
717 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
718 ; CHECK-NEXT: vfredusum.vs v8, v16, v9
719 ; CHECK-NEXT: fsrm a1
722 %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64(
723 <vscale x 1 x double> %0,
724 <vscale x 8 x double> %1,
725 <vscale x 1 x double> %2,
728 ret <vscale x 1 x double> %a
731 declare <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1(
732 <vscale x 1 x double>,
733 <vscale x 8 x double>,
734 <vscale x 1 x double>,
738 define <vscale x 1 x double> @intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
739 ; CHECK-LABEL: intrinsic_vfredusum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
740 ; CHECK: # %bb.0: # %entry
741 ; CHECK-NEXT: fsrmi a1, 0
742 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
743 ; CHECK-NEXT: vfredusum.vs v8, v16, v9, v0.t
744 ; CHECK-NEXT: fsrm a1
747 %a = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.nxv8i1(
748 <vscale x 1 x double> %0,
749 <vscale x 8 x double> %1,
750 <vscale x 1 x double> %2,
751 <vscale x 8 x i1> %3,
754 ret <vscale x 1 x double> %a