1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
6 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfhmin,+zvfh \
7 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
8 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfhmin,+zvfh \
9 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
10 ; RUN: sed 's/iXLen/i32/g' %s | not --crash llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfhmin \
11 ; RUN: -target-abi=ilp32d 2>&1 | FileCheck %s --check-prefixes=ZVFMIN
12 ; RUN: sed 's/iXLen/i64/g' %s | not --crash llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfhmin \
13 ; RUN: -target-abi=lp64d 2>&1 | FileCheck %s --check-prefixes=ZVFMIN
15 ; ZVFMIN: LLVM ERROR: Cannot select: intrinsic %llvm.riscv.vfadd
17 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
23 define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
24 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16:
25 ; CHECK: # %bb.0: # %entry
26 ; CHECK-NEXT: fsrmi a1, 0
27 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
28 ; CHECK-NEXT: vfadd.vv v8, v8, v9
32 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
33 <vscale x 1 x half> undef,
34 <vscale x 1 x half> %0,
35 <vscale x 1 x half> %1,
38 ret <vscale x 1 x half> %a
41 declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
48 define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
49 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
50 ; CHECK: # %bb.0: # %entry
51 ; CHECK-NEXT: fsrmi a1, 0
52 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
53 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
57 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
58 <vscale x 1 x half> %0,
59 <vscale x 1 x half> %1,
60 <vscale x 1 x half> %2,
62 iXLen 0, iXLen %4, iXLen 1)
64 ret <vscale x 1 x half> %a
67 declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
73 define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, iXLen %2) nounwind {
74 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16:
75 ; CHECK: # %bb.0: # %entry
76 ; CHECK-NEXT: fsrmi a1, 0
77 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
78 ; CHECK-NEXT: vfadd.vv v8, v8, v9
82 %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
83 <vscale x 2 x half> undef,
84 <vscale x 2 x half> %0,
85 <vscale x 2 x half> %1,
88 ret <vscale x 2 x half> %a
91 declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
98 define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
99 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
100 ; CHECK: # %bb.0: # %entry
101 ; CHECK-NEXT: fsrmi a1, 0
102 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
103 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
104 ; CHECK-NEXT: fsrm a1
107 %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
108 <vscale x 2 x half> %0,
109 <vscale x 2 x half> %1,
110 <vscale x 2 x half> %2,
111 <vscale x 2 x i1> %3,
112 iXLen 0, iXLen %4, iXLen 1)
114 ret <vscale x 2 x half> %a
117 declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
123 define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, iXLen %2) nounwind {
124 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16:
125 ; CHECK: # %bb.0: # %entry
126 ; CHECK-NEXT: fsrmi a1, 0
127 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
128 ; CHECK-NEXT: vfadd.vv v8, v8, v9
129 ; CHECK-NEXT: fsrm a1
132 %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
133 <vscale x 4 x half> undef,
134 <vscale x 4 x half> %0,
135 <vscale x 4 x half> %1,
138 ret <vscale x 4 x half> %a
141 declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
146 iXLen, iXLen, iXLen);
148 define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
149 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
150 ; CHECK: # %bb.0: # %entry
151 ; CHECK-NEXT: fsrmi a1, 0
152 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
153 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
154 ; CHECK-NEXT: fsrm a1
157 %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
158 <vscale x 4 x half> %0,
159 <vscale x 4 x half> %1,
160 <vscale x 4 x half> %2,
161 <vscale x 4 x i1> %3,
162 iXLen 0, iXLen %4, iXLen 1)
164 ret <vscale x 4 x half> %a
167 declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
173 define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, iXLen %2) nounwind {
174 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16:
175 ; CHECK: # %bb.0: # %entry
176 ; CHECK-NEXT: fsrmi a1, 0
177 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
178 ; CHECK-NEXT: vfadd.vv v8, v8, v10
179 ; CHECK-NEXT: fsrm a1
182 %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
183 <vscale x 8 x half> undef,
184 <vscale x 8 x half> %0,
185 <vscale x 8 x half> %1,
188 ret <vscale x 8 x half> %a
191 declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
196 iXLen, iXLen, iXLen);
198 define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
199 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
200 ; CHECK: # %bb.0: # %entry
201 ; CHECK-NEXT: fsrmi a1, 0
202 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
203 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t
204 ; CHECK-NEXT: fsrm a1
207 %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
208 <vscale x 8 x half> %0,
209 <vscale x 8 x half> %1,
210 <vscale x 8 x half> %2,
211 <vscale x 8 x i1> %3,
212 iXLen 0, iXLen %4, iXLen 1)
214 ret <vscale x 8 x half> %a
217 declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
218 <vscale x 16 x half>,
219 <vscale x 16 x half>,
220 <vscale x 16 x half>,
223 define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, iXLen %2) nounwind {
224 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16:
225 ; CHECK: # %bb.0: # %entry
226 ; CHECK-NEXT: fsrmi a1, 0
227 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
228 ; CHECK-NEXT: vfadd.vv v8, v8, v12
229 ; CHECK-NEXT: fsrm a1
232 %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
233 <vscale x 16 x half> undef,
234 <vscale x 16 x half> %0,
235 <vscale x 16 x half> %1,
238 ret <vscale x 16 x half> %a
241 declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
242 <vscale x 16 x half>,
243 <vscale x 16 x half>,
244 <vscale x 16 x half>,
246 iXLen, iXLen, iXLen);
248 define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
249 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
250 ; CHECK: # %bb.0: # %entry
251 ; CHECK-NEXT: fsrmi a1, 0
252 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
253 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t
254 ; CHECK-NEXT: fsrm a1
257 %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
258 <vscale x 16 x half> %0,
259 <vscale x 16 x half> %1,
260 <vscale x 16 x half> %2,
261 <vscale x 16 x i1> %3,
262 iXLen 0, iXLen %4, iXLen 1)
264 ret <vscale x 16 x half> %a
267 declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
268 <vscale x 32 x half>,
269 <vscale x 32 x half>,
270 <vscale x 32 x half>,
273 define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, iXLen %2) nounwind {
274 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16:
275 ; CHECK: # %bb.0: # %entry
276 ; CHECK-NEXT: fsrmi a1, 0
277 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
278 ; CHECK-NEXT: vfadd.vv v8, v8, v16
279 ; CHECK-NEXT: fsrm a1
282 %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
283 <vscale x 32 x half> undef,
284 <vscale x 32 x half> %0,
285 <vscale x 32 x half> %1,
288 ret <vscale x 32 x half> %a
291 declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
292 <vscale x 32 x half>,
293 <vscale x 32 x half>,
294 <vscale x 32 x half>,
296 iXLen, iXLen, iXLen);
298 define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
299 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16:
300 ; CHECK: # %bb.0: # %entry
301 ; CHECK-NEXT: vl8re16.v v24, (a0)
302 ; CHECK-NEXT: fsrmi a0, 0
303 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
304 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
305 ; CHECK-NEXT: fsrm a0
308 %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
309 <vscale x 32 x half> %0,
310 <vscale x 32 x half> %1,
311 <vscale x 32 x half> %2,
312 <vscale x 32 x i1> %3,
313 iXLen 0, iXLen %4, iXLen 1)
315 ret <vscale x 32 x half> %a
318 declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
319 <vscale x 1 x float>,
320 <vscale x 1 x float>,
321 <vscale x 1 x float>,
324 define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
325 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32:
326 ; CHECK: # %bb.0: # %entry
327 ; CHECK-NEXT: fsrmi a1, 0
328 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
329 ; CHECK-NEXT: vfadd.vv v8, v8, v9
330 ; CHECK-NEXT: fsrm a1
333 %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
334 <vscale x 1 x float> undef,
335 <vscale x 1 x float> %0,
336 <vscale x 1 x float> %1,
339 ret <vscale x 1 x float> %a
342 declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
343 <vscale x 1 x float>,
344 <vscale x 1 x float>,
345 <vscale x 1 x float>,
347 iXLen, iXLen, iXLen);
349 define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
350 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
351 ; CHECK: # %bb.0: # %entry
352 ; CHECK-NEXT: fsrmi a1, 0
353 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
354 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
355 ; CHECK-NEXT: fsrm a1
358 %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
359 <vscale x 1 x float> %0,
360 <vscale x 1 x float> %1,
361 <vscale x 1 x float> %2,
362 <vscale x 1 x i1> %3,
363 iXLen 0, iXLen %4, iXLen 1)
365 ret <vscale x 1 x float> %a
368 declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
369 <vscale x 2 x float>,
370 <vscale x 2 x float>,
371 <vscale x 2 x float>,
374 define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
375 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32:
376 ; CHECK: # %bb.0: # %entry
377 ; CHECK-NEXT: fsrmi a1, 0
378 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
379 ; CHECK-NEXT: vfadd.vv v8, v8, v9
380 ; CHECK-NEXT: fsrm a1
383 %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
384 <vscale x 2 x float> undef,
385 <vscale x 2 x float> %0,
386 <vscale x 2 x float> %1,
389 ret <vscale x 2 x float> %a
392 declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
393 <vscale x 2 x float>,
394 <vscale x 2 x float>,
395 <vscale x 2 x float>,
397 iXLen, iXLen, iXLen);
399 define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
400 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
401 ; CHECK: # %bb.0: # %entry
402 ; CHECK-NEXT: fsrmi a1, 0
403 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
404 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
405 ; CHECK-NEXT: fsrm a1
408 %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
409 <vscale x 2 x float> %0,
410 <vscale x 2 x float> %1,
411 <vscale x 2 x float> %2,
412 <vscale x 2 x i1> %3,
413 iXLen 0, iXLen %4, iXLen 1)
415 ret <vscale x 2 x float> %a
418 declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
419 <vscale x 4 x float>,
420 <vscale x 4 x float>,
421 <vscale x 4 x float>,
424 define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, iXLen %2) nounwind {
425 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32:
426 ; CHECK: # %bb.0: # %entry
427 ; CHECK-NEXT: fsrmi a1, 0
428 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
429 ; CHECK-NEXT: vfadd.vv v8, v8, v10
430 ; CHECK-NEXT: fsrm a1
433 %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
434 <vscale x 4 x float> undef,
435 <vscale x 4 x float> %0,
436 <vscale x 4 x float> %1,
439 ret <vscale x 4 x float> %a
442 declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
443 <vscale x 4 x float>,
444 <vscale x 4 x float>,
445 <vscale x 4 x float>,
447 iXLen, iXLen, iXLen);
449 define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
450 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
451 ; CHECK: # %bb.0: # %entry
452 ; CHECK-NEXT: fsrmi a1, 0
453 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
454 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t
455 ; CHECK-NEXT: fsrm a1
458 %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
459 <vscale x 4 x float> %0,
460 <vscale x 4 x float> %1,
461 <vscale x 4 x float> %2,
462 <vscale x 4 x i1> %3,
463 iXLen 0, iXLen %4, iXLen 1)
465 ret <vscale x 4 x float> %a
468 declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
469 <vscale x 8 x float>,
470 <vscale x 8 x float>,
471 <vscale x 8 x float>,
474 define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, iXLen %2) nounwind {
475 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32:
476 ; CHECK: # %bb.0: # %entry
477 ; CHECK-NEXT: fsrmi a1, 0
478 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
479 ; CHECK-NEXT: vfadd.vv v8, v8, v12
480 ; CHECK-NEXT: fsrm a1
483 %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
484 <vscale x 8 x float> undef,
485 <vscale x 8 x float> %0,
486 <vscale x 8 x float> %1,
489 ret <vscale x 8 x float> %a
492 declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
493 <vscale x 8 x float>,
494 <vscale x 8 x float>,
495 <vscale x 8 x float>,
497 iXLen, iXLen, iXLen);
499 define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
500 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
501 ; CHECK: # %bb.0: # %entry
502 ; CHECK-NEXT: fsrmi a1, 0
503 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
504 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t
505 ; CHECK-NEXT: fsrm a1
508 %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
509 <vscale x 8 x float> %0,
510 <vscale x 8 x float> %1,
511 <vscale x 8 x float> %2,
512 <vscale x 8 x i1> %3,
513 iXLen 0, iXLen %4, iXLen 1)
515 ret <vscale x 8 x float> %a
518 declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
519 <vscale x 16 x float>,
520 <vscale x 16 x float>,
521 <vscale x 16 x float>,
524 define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, iXLen %2) nounwind {
525 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32:
526 ; CHECK: # %bb.0: # %entry
527 ; CHECK-NEXT: fsrmi a1, 0
528 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
529 ; CHECK-NEXT: vfadd.vv v8, v8, v16
530 ; CHECK-NEXT: fsrm a1
533 %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
534 <vscale x 16 x float> undef,
535 <vscale x 16 x float> %0,
536 <vscale x 16 x float> %1,
539 ret <vscale x 16 x float> %a
542 declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
543 <vscale x 16 x float>,
544 <vscale x 16 x float>,
545 <vscale x 16 x float>,
547 iXLen, iXLen, iXLen);
549 define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
550 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32:
551 ; CHECK: # %bb.0: # %entry
552 ; CHECK-NEXT: vl8re32.v v24, (a0)
553 ; CHECK-NEXT: fsrmi a0, 0
554 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
555 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
556 ; CHECK-NEXT: fsrm a0
559 %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
560 <vscale x 16 x float> %0,
561 <vscale x 16 x float> %1,
562 <vscale x 16 x float> %2,
563 <vscale x 16 x i1> %3,
564 iXLen 0, iXLen %4, iXLen 1)
566 ret <vscale x 16 x float> %a
569 declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
570 <vscale x 1 x double>,
571 <vscale x 1 x double>,
572 <vscale x 1 x double>,
575 define <vscale x 1 x double> @intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, iXLen %2) nounwind {
576 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f64_nxv1f64_nxv1f64:
577 ; CHECK: # %bb.0: # %entry
578 ; CHECK-NEXT: fsrmi a1, 0
579 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
580 ; CHECK-NEXT: vfadd.vv v8, v8, v9
581 ; CHECK-NEXT: fsrm a1
584 %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.nxv1f64(
585 <vscale x 1 x double> undef,
586 <vscale x 1 x double> %0,
587 <vscale x 1 x double> %1,
590 ret <vscale x 1 x double> %a
593 declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
594 <vscale x 1 x double>,
595 <vscale x 1 x double>,
596 <vscale x 1 x double>,
598 iXLen, iXLen, iXLen);
600 define <vscale x 1 x double> @intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
601 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
602 ; CHECK: # %bb.0: # %entry
603 ; CHECK-NEXT: fsrmi a1, 0
604 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
605 ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
606 ; CHECK-NEXT: fsrm a1
609 %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.nxv1f64(
610 <vscale x 1 x double> %0,
611 <vscale x 1 x double> %1,
612 <vscale x 1 x double> %2,
613 <vscale x 1 x i1> %3,
614 iXLen 0, iXLen %4, iXLen 1)
616 ret <vscale x 1 x double> %a
619 declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
620 <vscale x 2 x double>,
621 <vscale x 2 x double>,
622 <vscale x 2 x double>,
625 define <vscale x 2 x double> @intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, iXLen %2) nounwind {
626 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f64_nxv2f64_nxv2f64:
627 ; CHECK: # %bb.0: # %entry
628 ; CHECK-NEXT: fsrmi a1, 0
629 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
630 ; CHECK-NEXT: vfadd.vv v8, v8, v10
631 ; CHECK-NEXT: fsrm a1
634 %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.nxv2f64(
635 <vscale x 2 x double> undef,
636 <vscale x 2 x double> %0,
637 <vscale x 2 x double> %1,
640 ret <vscale x 2 x double> %a
643 declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
644 <vscale x 2 x double>,
645 <vscale x 2 x double>,
646 <vscale x 2 x double>,
648 iXLen, iXLen, iXLen);
650 define <vscale x 2 x double> @intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
651 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
652 ; CHECK: # %bb.0: # %entry
653 ; CHECK-NEXT: fsrmi a1, 0
654 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
655 ; CHECK-NEXT: vfadd.vv v8, v10, v12, v0.t
656 ; CHECK-NEXT: fsrm a1
659 %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.nxv2f64(
660 <vscale x 2 x double> %0,
661 <vscale x 2 x double> %1,
662 <vscale x 2 x double> %2,
663 <vscale x 2 x i1> %3,
664 iXLen 0, iXLen %4, iXLen 1)
666 ret <vscale x 2 x double> %a
669 declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
670 <vscale x 4 x double>,
671 <vscale x 4 x double>,
672 <vscale x 4 x double>,
675 define <vscale x 4 x double> @intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, iXLen %2) nounwind {
676 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f64_nxv4f64_nxv4f64:
677 ; CHECK: # %bb.0: # %entry
678 ; CHECK-NEXT: fsrmi a1, 0
679 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
680 ; CHECK-NEXT: vfadd.vv v8, v8, v12
681 ; CHECK-NEXT: fsrm a1
684 %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.nxv4f64(
685 <vscale x 4 x double> undef,
686 <vscale x 4 x double> %0,
687 <vscale x 4 x double> %1,
690 ret <vscale x 4 x double> %a
693 declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
694 <vscale x 4 x double>,
695 <vscale x 4 x double>,
696 <vscale x 4 x double>,
698 iXLen, iXLen, iXLen);
700 define <vscale x 4 x double> @intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
701 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
702 ; CHECK: # %bb.0: # %entry
703 ; CHECK-NEXT: fsrmi a1, 0
704 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
705 ; CHECK-NEXT: vfadd.vv v8, v12, v16, v0.t
706 ; CHECK-NEXT: fsrm a1
709 %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.nxv4f64(
710 <vscale x 4 x double> %0,
711 <vscale x 4 x double> %1,
712 <vscale x 4 x double> %2,
713 <vscale x 4 x i1> %3,
714 iXLen 0, iXLen %4, iXLen 1)
716 ret <vscale x 4 x double> %a
719 declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
720 <vscale x 8 x double>,
721 <vscale x 8 x double>,
722 <vscale x 8 x double>,
725 define <vscale x 8 x double> @intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, iXLen %2) nounwind {
726 ; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f64_nxv8f64_nxv8f64:
727 ; CHECK: # %bb.0: # %entry
728 ; CHECK-NEXT: fsrmi a1, 0
729 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
730 ; CHECK-NEXT: vfadd.vv v8, v8, v16
731 ; CHECK-NEXT: fsrm a1
734 %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.nxv8f64(
735 <vscale x 8 x double> undef,
736 <vscale x 8 x double> %0,
737 <vscale x 8 x double> %1,
740 ret <vscale x 8 x double> %a
743 declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
744 <vscale x 8 x double>,
745 <vscale x 8 x double>,
746 <vscale x 8 x double>,
748 iXLen, iXLen, iXLen);
750 define <vscale x 8 x double> @intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, <vscale x 8 x double> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
751 ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f64_nxv8f64_nxv8f64:
752 ; CHECK: # %bb.0: # %entry
753 ; CHECK-NEXT: vl8re64.v v24, (a0)
754 ; CHECK-NEXT: fsrmi a0, 0
755 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
756 ; CHECK-NEXT: vfadd.vv v8, v16, v24, v0.t
757 ; CHECK-NEXT: fsrm a0
760 %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.nxv8f64(
761 <vscale x 8 x double> %0,
762 <vscale x 8 x double> %1,
763 <vscale x 8 x double> %2,
764 <vscale x 8 x i1> %3,
765 iXLen 0, iXLen %4, iXLen 1)
767 ret <vscale x 8 x double> %a
770 declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
776 define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
777 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16:
778 ; CHECK: # %bb.0: # %entry
779 ; CHECK-NEXT: fsrmi a1, 0
780 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
781 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
782 ; CHECK-NEXT: fsrm a1
785 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
786 <vscale x 1 x half> undef,
787 <vscale x 1 x half> %0,
791 ret <vscale x 1 x half> %a
794 declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
799 iXLen, iXLen, iXLen);
801 define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
802 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16:
803 ; CHECK: # %bb.0: # %entry
804 ; CHECK-NEXT: fsrmi a1, 0
805 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
806 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
807 ; CHECK-NEXT: fsrm a1
810 %a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
811 <vscale x 1 x half> %0,
812 <vscale x 1 x half> %1,
814 <vscale x 1 x i1> %3,
815 iXLen 0, iXLen %4, iXLen 1)
817 ret <vscale x 1 x half> %a
820 declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
826 define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
827 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16:
828 ; CHECK: # %bb.0: # %entry
829 ; CHECK-NEXT: fsrmi a1, 0
830 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
831 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
832 ; CHECK-NEXT: fsrm a1
835 %a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
836 <vscale x 2 x half> undef,
837 <vscale x 2 x half> %0,
841 ret <vscale x 2 x half> %a
844 declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
849 iXLen, iXLen, iXLen);
851 define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
852 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16:
853 ; CHECK: # %bb.0: # %entry
854 ; CHECK-NEXT: fsrmi a1, 0
855 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
856 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
857 ; CHECK-NEXT: fsrm a1
860 %a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
861 <vscale x 2 x half> %0,
862 <vscale x 2 x half> %1,
864 <vscale x 2 x i1> %3,
865 iXLen 0, iXLen %4, iXLen 1)
867 ret <vscale x 2 x half> %a
870 declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
876 define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
877 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16:
878 ; CHECK: # %bb.0: # %entry
879 ; CHECK-NEXT: fsrmi a1, 0
880 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
881 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
882 ; CHECK-NEXT: fsrm a1
885 %a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
886 <vscale x 4 x half> undef,
887 <vscale x 4 x half> %0,
891 ret <vscale x 4 x half> %a
894 declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
899 iXLen, iXLen, iXLen);
901 define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
902 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16:
903 ; CHECK: # %bb.0: # %entry
904 ; CHECK-NEXT: fsrmi a1, 0
905 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
906 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
907 ; CHECK-NEXT: fsrm a1
910 %a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
911 <vscale x 4 x half> %0,
912 <vscale x 4 x half> %1,
914 <vscale x 4 x i1> %3,
915 iXLen 0, iXLen %4, iXLen 1)
917 ret <vscale x 4 x half> %a
920 declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
926 define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
927 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16:
928 ; CHECK: # %bb.0: # %entry
929 ; CHECK-NEXT: fsrmi a1, 0
930 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
931 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
932 ; CHECK-NEXT: fsrm a1
935 %a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
936 <vscale x 8 x half> undef,
937 <vscale x 8 x half> %0,
941 ret <vscale x 8 x half> %a
944 declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
949 iXLen, iXLen, iXLen);
951 define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
952 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16:
953 ; CHECK: # %bb.0: # %entry
954 ; CHECK-NEXT: fsrmi a1, 0
955 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
956 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
957 ; CHECK-NEXT: fsrm a1
960 %a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
961 <vscale x 8 x half> %0,
962 <vscale x 8 x half> %1,
964 <vscale x 8 x i1> %3,
965 iXLen 0, iXLen %4, iXLen 1)
967 ret <vscale x 8 x half> %a
970 declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
971 <vscale x 16 x half>,
972 <vscale x 16 x half>,
976 define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
977 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16:
978 ; CHECK: # %bb.0: # %entry
979 ; CHECK-NEXT: fsrmi a1, 0
980 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
981 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
982 ; CHECK-NEXT: fsrm a1
985 %a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
986 <vscale x 16 x half> undef,
987 <vscale x 16 x half> %0,
991 ret <vscale x 16 x half> %a
994 declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
995 <vscale x 16 x half>,
996 <vscale x 16 x half>,
999 iXLen, iXLen, iXLen);
1001 define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1002 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16:
1003 ; CHECK: # %bb.0: # %entry
1004 ; CHECK-NEXT: fsrmi a1, 0
1005 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
1006 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
1007 ; CHECK-NEXT: fsrm a1
1010 %a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
1011 <vscale x 16 x half> %0,
1012 <vscale x 16 x half> %1,
1014 <vscale x 16 x i1> %3,
1015 iXLen 0, iXLen %4, iXLen 1)
1017 ret <vscale x 16 x half> %a
1020 declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
1021 <vscale x 32 x half>,
1022 <vscale x 32 x half>,
1026 define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
1027 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16:
1028 ; CHECK: # %bb.0: # %entry
1029 ; CHECK-NEXT: fsrmi a1, 0
1030 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1031 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1032 ; CHECK-NEXT: fsrm a1
1035 %a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
1036 <vscale x 32 x half> undef,
1037 <vscale x 32 x half> %0,
1041 ret <vscale x 32 x half> %a
1044 declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
1045 <vscale x 32 x half>,
1046 <vscale x 32 x half>,
1049 iXLen, iXLen, iXLen);
1051 define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, iXLen %4) nounwind {
1052 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16:
1053 ; CHECK: # %bb.0: # %entry
1054 ; CHECK-NEXT: fsrmi a1, 0
1055 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
1056 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
1057 ; CHECK-NEXT: fsrm a1
1060 %a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
1061 <vscale x 32 x half> %0,
1062 <vscale x 32 x half> %1,
1064 <vscale x 32 x i1> %3,
1065 iXLen 0, iXLen %4, iXLen 1)
1067 ret <vscale x 32 x half> %a
1070 declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
1071 <vscale x 1 x float>,
1072 <vscale x 1 x float>,
1076 define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
1077 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32:
1078 ; CHECK: # %bb.0: # %entry
1079 ; CHECK-NEXT: fsrmi a1, 0
1080 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1081 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1082 ; CHECK-NEXT: fsrm a1
1085 %a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
1086 <vscale x 1 x float> undef,
1087 <vscale x 1 x float> %0,
1091 ret <vscale x 1 x float> %a
1094 declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
1095 <vscale x 1 x float>,
1096 <vscale x 1 x float>,
1099 iXLen, iXLen, iXLen);
1101 define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1102 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32:
1103 ; CHECK: # %bb.0: # %entry
1104 ; CHECK-NEXT: fsrmi a1, 0
1105 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
1106 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
1107 ; CHECK-NEXT: fsrm a1
1110 %a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
1111 <vscale x 1 x float> %0,
1112 <vscale x 1 x float> %1,
1114 <vscale x 1 x i1> %3,
1115 iXLen 0, iXLen %4, iXLen 1)
1117 ret <vscale x 1 x float> %a
1120 declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
1121 <vscale x 2 x float>,
1122 <vscale x 2 x float>,
1126 define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
1127 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32:
1128 ; CHECK: # %bb.0: # %entry
1129 ; CHECK-NEXT: fsrmi a1, 0
1130 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1131 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1132 ; CHECK-NEXT: fsrm a1
1135 %a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
1136 <vscale x 2 x float> undef,
1137 <vscale x 2 x float> %0,
1141 ret <vscale x 2 x float> %a
1144 declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
1145 <vscale x 2 x float>,
1146 <vscale x 2 x float>,
1149 iXLen, iXLen, iXLen);
1151 define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1152 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32:
1153 ; CHECK: # %bb.0: # %entry
1154 ; CHECK-NEXT: fsrmi a1, 0
1155 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
1156 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
1157 ; CHECK-NEXT: fsrm a1
1160 %a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
1161 <vscale x 2 x float> %0,
1162 <vscale x 2 x float> %1,
1164 <vscale x 2 x i1> %3,
1165 iXLen 0, iXLen %4, iXLen 1)
1167 ret <vscale x 2 x float> %a
1170 declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
1171 <vscale x 4 x float>,
1172 <vscale x 4 x float>,
1176 define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
1177 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32:
1178 ; CHECK: # %bb.0: # %entry
1179 ; CHECK-NEXT: fsrmi a1, 0
1180 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1181 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1182 ; CHECK-NEXT: fsrm a1
1185 %a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
1186 <vscale x 4 x float> undef,
1187 <vscale x 4 x float> %0,
1191 ret <vscale x 4 x float> %a
1194 declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
1195 <vscale x 4 x float>,
1196 <vscale x 4 x float>,
1199 iXLen, iXLen, iXLen);
1201 define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1202 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32:
1203 ; CHECK: # %bb.0: # %entry
1204 ; CHECK-NEXT: fsrmi a1, 0
1205 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
1206 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
1207 ; CHECK-NEXT: fsrm a1
1210 %a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
1211 <vscale x 4 x float> %0,
1212 <vscale x 4 x float> %1,
1214 <vscale x 4 x i1> %3,
1215 iXLen 0, iXLen %4, iXLen 1)
1217 ret <vscale x 4 x float> %a
1220 declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
1221 <vscale x 8 x float>,
1222 <vscale x 8 x float>,
1226 define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
1227 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32:
1228 ; CHECK: # %bb.0: # %entry
1229 ; CHECK-NEXT: fsrmi a1, 0
1230 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1231 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1232 ; CHECK-NEXT: fsrm a1
1235 %a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
1236 <vscale x 8 x float> undef,
1237 <vscale x 8 x float> %0,
1241 ret <vscale x 8 x float> %a
1244 declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
1245 <vscale x 8 x float>,
1246 <vscale x 8 x float>,
1249 iXLen, iXLen, iXLen);
1251 define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1252 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32:
1253 ; CHECK: # %bb.0: # %entry
1254 ; CHECK-NEXT: fsrmi a1, 0
1255 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
1256 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
1257 ; CHECK-NEXT: fsrm a1
1260 %a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
1261 <vscale x 8 x float> %0,
1262 <vscale x 8 x float> %1,
1264 <vscale x 8 x i1> %3,
1265 iXLen 0, iXLen %4, iXLen 1)
1267 ret <vscale x 8 x float> %a
1270 declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
1271 <vscale x 16 x float>,
1272 <vscale x 16 x float>,
1276 define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
1277 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32:
1278 ; CHECK: # %bb.0: # %entry
1279 ; CHECK-NEXT: fsrmi a1, 0
1280 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1281 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1282 ; CHECK-NEXT: fsrm a1
1285 %a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
1286 <vscale x 16 x float> undef,
1287 <vscale x 16 x float> %0,
1291 ret <vscale x 16 x float> %a
1294 declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
1295 <vscale x 16 x float>,
1296 <vscale x 16 x float>,
1299 iXLen, iXLen, iXLen);
1301 define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
1302 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32:
1303 ; CHECK: # %bb.0: # %entry
1304 ; CHECK-NEXT: fsrmi a1, 0
1305 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
1306 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
1307 ; CHECK-NEXT: fsrm a1
1310 %a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
1311 <vscale x 16 x float> %0,
1312 <vscale x 16 x float> %1,
1314 <vscale x 16 x i1> %3,
1315 iXLen 0, iXLen %4, iXLen 1)
1317 ret <vscale x 16 x float> %a
1320 declare <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
1321 <vscale x 1 x double>,
1322 <vscale x 1 x double>,
1326 define <vscale x 1 x double> @intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
1327 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f64_nxv1f64_f64:
1328 ; CHECK: # %bb.0: # %entry
1329 ; CHECK-NEXT: fsrmi a1, 0
1330 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1331 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1332 ; CHECK-NEXT: fsrm a1
1335 %a = call <vscale x 1 x double> @llvm.riscv.vfadd.nxv1f64.f64(
1336 <vscale x 1 x double> undef,
1337 <vscale x 1 x double> %0,
1341 ret <vscale x 1 x double> %a
1344 declare <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
1345 <vscale x 1 x double>,
1346 <vscale x 1 x double>,
1349 iXLen, iXLen, iXLen);
1351 define <vscale x 1 x double> @intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1352 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f64_nxv1f64_f64:
1353 ; CHECK: # %bb.0: # %entry
1354 ; CHECK-NEXT: fsrmi a1, 0
1355 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
1356 ; CHECK-NEXT: vfadd.vf v8, v9, fa0, v0.t
1357 ; CHECK-NEXT: fsrm a1
1360 %a = call <vscale x 1 x double> @llvm.riscv.vfadd.mask.nxv1f64.f64(
1361 <vscale x 1 x double> %0,
1362 <vscale x 1 x double> %1,
1364 <vscale x 1 x i1> %3,
1365 iXLen 0, iXLen %4, iXLen 1)
1367 ret <vscale x 1 x double> %a
1370 declare <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
1371 <vscale x 2 x double>,
1372 <vscale x 2 x double>,
1376 define <vscale x 2 x double> @intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
1377 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f64_nxv2f64_f64:
1378 ; CHECK: # %bb.0: # %entry
1379 ; CHECK-NEXT: fsrmi a1, 0
1380 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1381 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1382 ; CHECK-NEXT: fsrm a1
1385 %a = call <vscale x 2 x double> @llvm.riscv.vfadd.nxv2f64.f64(
1386 <vscale x 2 x double> undef,
1387 <vscale x 2 x double> %0,
1391 ret <vscale x 2 x double> %a
1394 declare <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
1395 <vscale x 2 x double>,
1396 <vscale x 2 x double>,
1399 iXLen, iXLen, iXLen);
1401 define <vscale x 2 x double> @intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1402 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f64_nxv2f64_f64:
1403 ; CHECK: # %bb.0: # %entry
1404 ; CHECK-NEXT: fsrmi a1, 0
1405 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
1406 ; CHECK-NEXT: vfadd.vf v8, v10, fa0, v0.t
1407 ; CHECK-NEXT: fsrm a1
1410 %a = call <vscale x 2 x double> @llvm.riscv.vfadd.mask.nxv2f64.f64(
1411 <vscale x 2 x double> %0,
1412 <vscale x 2 x double> %1,
1414 <vscale x 2 x i1> %3,
1415 iXLen 0, iXLen %4, iXLen 1)
1417 ret <vscale x 2 x double> %a
1420 declare <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
1421 <vscale x 4 x double>,
1422 <vscale x 4 x double>,
1426 define <vscale x 4 x double> @intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
1427 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f64_nxv4f64_f64:
1428 ; CHECK: # %bb.0: # %entry
1429 ; CHECK-NEXT: fsrmi a1, 0
1430 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1431 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1432 ; CHECK-NEXT: fsrm a1
1435 %a = call <vscale x 4 x double> @llvm.riscv.vfadd.nxv4f64.f64(
1436 <vscale x 4 x double> undef,
1437 <vscale x 4 x double> %0,
1441 ret <vscale x 4 x double> %a
1444 declare <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
1445 <vscale x 4 x double>,
1446 <vscale x 4 x double>,
1449 iXLen, iXLen, iXLen);
1451 define <vscale x 4 x double> @intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1452 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f64_nxv4f64_f64:
1453 ; CHECK: # %bb.0: # %entry
1454 ; CHECK-NEXT: fsrmi a1, 0
1455 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
1456 ; CHECK-NEXT: vfadd.vf v8, v12, fa0, v0.t
1457 ; CHECK-NEXT: fsrm a1
1460 %a = call <vscale x 4 x double> @llvm.riscv.vfadd.mask.nxv4f64.f64(
1461 <vscale x 4 x double> %0,
1462 <vscale x 4 x double> %1,
1464 <vscale x 4 x i1> %3,
1465 iXLen 0, iXLen %4, iXLen 1)
1467 ret <vscale x 4 x double> %a
1470 declare <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
1471 <vscale x 8 x double>,
1472 <vscale x 8 x double>,
1476 define <vscale x 8 x double> @intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
1477 ; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f64_nxv8f64_f64:
1478 ; CHECK: # %bb.0: # %entry
1479 ; CHECK-NEXT: fsrmi a1, 0
1480 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1481 ; CHECK-NEXT: vfadd.vf v8, v8, fa0
1482 ; CHECK-NEXT: fsrm a1
1485 %a = call <vscale x 8 x double> @llvm.riscv.vfadd.nxv8f64.f64(
1486 <vscale x 8 x double> undef,
1487 <vscale x 8 x double> %0,
1491 ret <vscale x 8 x double> %a
1494 declare <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
1495 <vscale x 8 x double>,
1496 <vscale x 8 x double>,
1499 iXLen, iXLen, iXLen);
1501 define <vscale x 8 x double> @intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1502 ; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f64_nxv8f64_f64:
1503 ; CHECK: # %bb.0: # %entry
1504 ; CHECK-NEXT: fsrmi a1, 0
1505 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
1506 ; CHECK-NEXT: vfadd.vf v8, v16, fa0, v0.t
1507 ; CHECK-NEXT: fsrm a1
1510 %a = call <vscale x 8 x double> @llvm.riscv.vfadd.mask.nxv8f64.f64(
1511 <vscale x 8 x double> %0,
1512 <vscale x 8 x double> %1,
1514 <vscale x 8 x i1> %3,
1515 iXLen 0, iXLen %4, iXLen 1)
1517 ret <vscale x 8 x double> %a