1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
13 define <vscale x 1 x half> @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
14 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16:
15 ; CHECK: # %bb.0: # %entry
16 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
17 ; CHECK-NEXT: fsrmi a0, 0
18 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10
22 %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
23 <vscale x 1 x half> %0,
24 <vscale x 1 x half> %1,
25 <vscale x 1 x half> %2,
26 iXLen 0, iXLen %3, iXLen 0)
28 ret <vscale x 1 x half> %a
31 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
38 define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
39 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
40 ; CHECK: # %bb.0: # %entry
41 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
42 ; CHECK-NEXT: fsrmi a0, 0
43 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
47 %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
48 <vscale x 1 x half> %0,
49 <vscale x 1 x half> %1,
50 <vscale x 1 x half> %2,
52 iXLen 0, iXLen %4, iXLen 0);
54 ret <vscale x 1 x half> %a
57 declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
63 define <vscale x 2 x half> @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
64 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16:
65 ; CHECK: # %bb.0: # %entry
66 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
67 ; CHECK-NEXT: fsrmi a0, 0
68 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10
72 %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
73 <vscale x 2 x half> %0,
74 <vscale x 2 x half> %1,
75 <vscale x 2 x half> %2,
76 iXLen 0, iXLen %3, iXLen 0)
78 ret <vscale x 2 x half> %a
81 declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
88 define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
89 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
90 ; CHECK: # %bb.0: # %entry
91 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
92 ; CHECK-NEXT: fsrmi a0, 0
93 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
97 %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
98 <vscale x 2 x half> %0,
99 <vscale x 2 x half> %1,
100 <vscale x 2 x half> %2,
101 <vscale x 2 x i1> %3,
102 iXLen 0, iXLen %4, iXLen 0);
104 ret <vscale x 2 x half> %a
107 declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
111 iXLen, iXLen, iXLen);
113 define <vscale x 4 x half> @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
114 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16:
115 ; CHECK: # %bb.0: # %entry
116 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
117 ; CHECK-NEXT: fsrmi a0, 0
118 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10
119 ; CHECK-NEXT: fsrm a0
122 %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
123 <vscale x 4 x half> %0,
124 <vscale x 4 x half> %1,
125 <vscale x 4 x half> %2,
126 iXLen 0, iXLen %3, iXLen 0)
128 ret <vscale x 4 x half> %a
131 declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
136 iXLen, iXLen, iXLen);
138 define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
139 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
142 ; CHECK-NEXT: fsrmi a0, 0
143 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
144 ; CHECK-NEXT: fsrm a0
147 %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
148 <vscale x 4 x half> %0,
149 <vscale x 4 x half> %1,
150 <vscale x 4 x half> %2,
151 <vscale x 4 x i1> %3,
152 iXLen 0, iXLen %4, iXLen 0);
154 ret <vscale x 4 x half> %a
157 declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
161 iXLen, iXLen, iXLen);
163 define <vscale x 8 x half> @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
164 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
167 ; CHECK-NEXT: fsrmi a0, 0
168 ; CHECK-NEXT: vfnmsub.vv v8, v10, v12
169 ; CHECK-NEXT: fsrm a0
172 %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
173 <vscale x 8 x half> %0,
174 <vscale x 8 x half> %1,
175 <vscale x 8 x half> %2,
176 iXLen 0, iXLen %3, iXLen 0)
178 ret <vscale x 8 x half> %a
181 declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
186 iXLen, iXLen, iXLen);
188 define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
189 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
190 ; CHECK: # %bb.0: # %entry
191 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
192 ; CHECK-NEXT: fsrmi a0, 0
193 ; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t
194 ; CHECK-NEXT: fsrm a0
197 %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
198 <vscale x 8 x half> %0,
199 <vscale x 8 x half> %1,
200 <vscale x 8 x half> %2,
201 <vscale x 8 x i1> %3,
202 iXLen 0, iXLen %4, iXLen 0);
204 ret <vscale x 8 x half> %a
207 declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
208 <vscale x 16 x half>,
209 <vscale x 16 x half>,
210 <vscale x 16 x half>,
211 iXLen, iXLen, iXLen);
213 define <vscale x 16 x half> @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
214 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16:
215 ; CHECK: # %bb.0: # %entry
216 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
217 ; CHECK-NEXT: fsrmi a0, 0
218 ; CHECK-NEXT: vfnmsub.vv v8, v12, v16
219 ; CHECK-NEXT: fsrm a0
222 %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
223 <vscale x 16 x half> %0,
224 <vscale x 16 x half> %1,
225 <vscale x 16 x half> %2,
226 iXLen 0, iXLen %3, iXLen 0)
228 ret <vscale x 16 x half> %a
231 declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
232 <vscale x 16 x half>,
233 <vscale x 16 x half>,
234 <vscale x 16 x half>,
236 iXLen, iXLen, iXLen);
238 define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
239 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
240 ; CHECK: # %bb.0: # %entry
241 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
242 ; CHECK-NEXT: fsrmi a0, 0
243 ; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t
244 ; CHECK-NEXT: fsrm a0
247 %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
248 <vscale x 16 x half> %0,
249 <vscale x 16 x half> %1,
250 <vscale x 16 x half> %2,
251 <vscale x 16 x i1> %3,
252 iXLen 0, iXLen %4, iXLen 0);
254 ret <vscale x 16 x half> %a
257 declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
258 <vscale x 1 x float>,
259 <vscale x 1 x float>,
260 <vscale x 1 x float>,
261 iXLen, iXLen, iXLen);
263 define <vscale x 1 x float> @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
264 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
265 ; CHECK: # %bb.0: # %entry
266 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
267 ; CHECK-NEXT: fsrmi a0, 0
268 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10
269 ; CHECK-NEXT: fsrm a0
272 %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
273 <vscale x 1 x float> %0,
274 <vscale x 1 x float> %1,
275 <vscale x 1 x float> %2,
276 iXLen 0, iXLen %3, iXLen 0)
278 ret <vscale x 1 x float> %a
281 declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
282 <vscale x 1 x float>,
283 <vscale x 1 x float>,
284 <vscale x 1 x float>,
286 iXLen, iXLen, iXLen);
288 define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
289 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
290 ; CHECK: # %bb.0: # %entry
291 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
292 ; CHECK-NEXT: fsrmi a0, 0
293 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
294 ; CHECK-NEXT: fsrm a0
297 %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
298 <vscale x 1 x float> %0,
299 <vscale x 1 x float> %1,
300 <vscale x 1 x float> %2,
301 <vscale x 1 x i1> %3,
302 iXLen 0, iXLen %4, iXLen 0);
304 ret <vscale x 1 x float> %a
307 declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
308 <vscale x 2 x float>,
309 <vscale x 2 x float>,
310 <vscale x 2 x float>,
311 iXLen, iXLen, iXLen);
313 define <vscale x 2 x float> @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
314 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32:
315 ; CHECK: # %bb.0: # %entry
316 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
317 ; CHECK-NEXT: fsrmi a0, 0
318 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10
319 ; CHECK-NEXT: fsrm a0
322 %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
323 <vscale x 2 x float> %0,
324 <vscale x 2 x float> %1,
325 <vscale x 2 x float> %2,
326 iXLen 0, iXLen %3, iXLen 0)
328 ret <vscale x 2 x float> %a
331 declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
332 <vscale x 2 x float>,
333 <vscale x 2 x float>,
334 <vscale x 2 x float>,
336 iXLen, iXLen, iXLen);
338 define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
339 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
340 ; CHECK: # %bb.0: # %entry
341 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
342 ; CHECK-NEXT: fsrmi a0, 0
343 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
344 ; CHECK-NEXT: fsrm a0
347 %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
348 <vscale x 2 x float> %0,
349 <vscale x 2 x float> %1,
350 <vscale x 2 x float> %2,
351 <vscale x 2 x i1> %3,
352 iXLen 0, iXLen %4, iXLen 0);
354 ret <vscale x 2 x float> %a
357 declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
358 <vscale x 4 x float>,
359 <vscale x 4 x float>,
360 <vscale x 4 x float>,
361 iXLen, iXLen, iXLen);
363 define <vscale x 4 x float> @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
364 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32:
365 ; CHECK: # %bb.0: # %entry
366 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
367 ; CHECK-NEXT: fsrmi a0, 0
368 ; CHECK-NEXT: vfnmsub.vv v8, v10, v12
369 ; CHECK-NEXT: fsrm a0
372 %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
373 <vscale x 4 x float> %0,
374 <vscale x 4 x float> %1,
375 <vscale x 4 x float> %2,
376 iXLen 0, iXLen %3, iXLen 0)
378 ret <vscale x 4 x float> %a
381 declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
382 <vscale x 4 x float>,
383 <vscale x 4 x float>,
384 <vscale x 4 x float>,
386 iXLen, iXLen, iXLen);
388 define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
389 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
390 ; CHECK: # %bb.0: # %entry
391 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
392 ; CHECK-NEXT: fsrmi a0, 0
393 ; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t
394 ; CHECK-NEXT: fsrm a0
397 %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
398 <vscale x 4 x float> %0,
399 <vscale x 4 x float> %1,
400 <vscale x 4 x float> %2,
401 <vscale x 4 x i1> %3,
402 iXLen 0, iXLen %4, iXLen 0);
404 ret <vscale x 4 x float> %a
407 declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
408 <vscale x 8 x float>,
409 <vscale x 8 x float>,
410 <vscale x 8 x float>,
411 iXLen, iXLen, iXLen);
413 define <vscale x 8 x float> @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
414 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32:
415 ; CHECK: # %bb.0: # %entry
416 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
417 ; CHECK-NEXT: fsrmi a0, 0
418 ; CHECK-NEXT: vfnmsub.vv v8, v12, v16
419 ; CHECK-NEXT: fsrm a0
422 %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
423 <vscale x 8 x float> %0,
424 <vscale x 8 x float> %1,
425 <vscale x 8 x float> %2,
426 iXLen 0, iXLen %3, iXLen 0)
428 ret <vscale x 8 x float> %a
431 declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
432 <vscale x 8 x float>,
433 <vscale x 8 x float>,
434 <vscale x 8 x float>,
436 iXLen, iXLen, iXLen);
438 define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
439 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
440 ; CHECK: # %bb.0: # %entry
441 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
442 ; CHECK-NEXT: fsrmi a0, 0
443 ; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t
444 ; CHECK-NEXT: fsrm a0
447 %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
448 <vscale x 8 x float> %0,
449 <vscale x 8 x float> %1,
450 <vscale x 8 x float> %2,
451 <vscale x 8 x i1> %3,
452 iXLen 0, iXLen %4, iXLen 0);
454 ret <vscale x 8 x float> %a
457 declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
458 <vscale x 1 x double>,
459 <vscale x 1 x double>,
460 <vscale x 1 x double>,
461 iXLen, iXLen, iXLen);
463 define <vscale x 1 x double> @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
464 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64:
465 ; CHECK: # %bb.0: # %entry
466 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
467 ; CHECK-NEXT: fsrmi a0, 0
468 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10
469 ; CHECK-NEXT: fsrm a0
472 %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
473 <vscale x 1 x double> %0,
474 <vscale x 1 x double> %1,
475 <vscale x 1 x double> %2,
476 iXLen 0, iXLen %3, iXLen 0)
478 ret <vscale x 1 x double> %a
481 declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
482 <vscale x 1 x double>,
483 <vscale x 1 x double>,
484 <vscale x 1 x double>,
486 iXLen, iXLen, iXLen);
488 define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
489 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
490 ; CHECK: # %bb.0: # %entry
491 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
492 ; CHECK-NEXT: fsrmi a0, 0
493 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10, v0.t
494 ; CHECK-NEXT: fsrm a0
497 %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
498 <vscale x 1 x double> %0,
499 <vscale x 1 x double> %1,
500 <vscale x 1 x double> %2,
501 <vscale x 1 x i1> %3,
502 iXLen 0, iXLen %4, iXLen 0);
504 ret <vscale x 1 x double> %a
507 declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
508 <vscale x 2 x double>,
509 <vscale x 2 x double>,
510 <vscale x 2 x double>,
511 iXLen, iXLen, iXLen);
513 define <vscale x 2 x double> @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
514 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64:
515 ; CHECK: # %bb.0: # %entry
516 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
517 ; CHECK-NEXT: fsrmi a0, 0
518 ; CHECK-NEXT: vfnmsub.vv v8, v10, v12
519 ; CHECK-NEXT: fsrm a0
522 %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
523 <vscale x 2 x double> %0,
524 <vscale x 2 x double> %1,
525 <vscale x 2 x double> %2,
526 iXLen 0, iXLen %3, iXLen 0)
528 ret <vscale x 2 x double> %a
531 declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
532 <vscale x 2 x double>,
533 <vscale x 2 x double>,
534 <vscale x 2 x double>,
536 iXLen, iXLen, iXLen);
538 define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
539 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
540 ; CHECK: # %bb.0: # %entry
541 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
542 ; CHECK-NEXT: fsrmi a0, 0
543 ; CHECK-NEXT: vfnmsub.vv v8, v10, v12, v0.t
544 ; CHECK-NEXT: fsrm a0
547 %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
548 <vscale x 2 x double> %0,
549 <vscale x 2 x double> %1,
550 <vscale x 2 x double> %2,
551 <vscale x 2 x i1> %3,
552 iXLen 0, iXLen %4, iXLen 0);
554 ret <vscale x 2 x double> %a
557 declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
558 <vscale x 4 x double>,
559 <vscale x 4 x double>,
560 <vscale x 4 x double>,
561 iXLen, iXLen, iXLen);
563 define <vscale x 4 x double> @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
564 ; CHECK-LABEL: intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64:
565 ; CHECK: # %bb.0: # %entry
566 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
567 ; CHECK-NEXT: fsrmi a0, 0
568 ; CHECK-NEXT: vfnmsub.vv v8, v12, v16
569 ; CHECK-NEXT: fsrm a0
572 %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
573 <vscale x 4 x double> %0,
574 <vscale x 4 x double> %1,
575 <vscale x 4 x double> %2,
576 iXLen 0, iXLen %3, iXLen 0)
578 ret <vscale x 4 x double> %a
581 declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
582 <vscale x 4 x double>,
583 <vscale x 4 x double>,
584 <vscale x 4 x double>,
586 iXLen, iXLen, iXLen);
588 define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
589 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
590 ; CHECK: # %bb.0: # %entry
591 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
592 ; CHECK-NEXT: fsrmi a0, 0
593 ; CHECK-NEXT: vfnmsub.vv v8, v12, v16, v0.t
594 ; CHECK-NEXT: fsrm a0
597 %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
598 <vscale x 4 x double> %0,
599 <vscale x 4 x double> %1,
600 <vscale x 4 x double> %2,
601 <vscale x 4 x i1> %3,
602 iXLen 0, iXLen %4, iXLen 0);
604 ret <vscale x 4 x double> %a
607 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
611 iXLen, iXLen, iXLen);
613 define <vscale x 1 x half> @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
614 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
617 ; CHECK-NEXT: fsrmi a0, 0
618 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
619 ; CHECK-NEXT: fsrm a0
622 %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
623 <vscale x 1 x half> %0,
625 <vscale x 1 x half> %2,
626 iXLen 0, iXLen %3, iXLen 0)
628 ret <vscale x 1 x half> %a
631 declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
636 iXLen, iXLen, iXLen);
638 define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
639 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
640 ; CHECK: # %bb.0: # %entry
641 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
642 ; CHECK-NEXT: fsrmi a0, 0
643 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
644 ; CHECK-NEXT: fsrm a0
647 %a = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
648 <vscale x 1 x half> %0,
650 <vscale x 1 x half> %2,
651 <vscale x 1 x i1> %3,
652 iXLen 0, iXLen %4, iXLen 0);
654 ret <vscale x 1 x half> %a
657 declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
661 iXLen, iXLen, iXLen);
663 define <vscale x 2 x half> @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
664 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16:
665 ; CHECK: # %bb.0: # %entry
666 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
667 ; CHECK-NEXT: fsrmi a0, 0
668 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
669 ; CHECK-NEXT: fsrm a0
672 %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
673 <vscale x 2 x half> %0,
675 <vscale x 2 x half> %2,
676 iXLen 0, iXLen %3, iXLen 0)
678 ret <vscale x 2 x half> %a
681 declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
686 iXLen, iXLen, iXLen);
688 define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
689 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
690 ; CHECK: # %bb.0: # %entry
691 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
692 ; CHECK-NEXT: fsrmi a0, 0
693 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
694 ; CHECK-NEXT: fsrm a0
697 %a = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
698 <vscale x 2 x half> %0,
700 <vscale x 2 x half> %2,
701 <vscale x 2 x i1> %3,
702 iXLen 0, iXLen %4, iXLen 0);
704 ret <vscale x 2 x half> %a
707 declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
711 iXLen, iXLen, iXLen);
713 define <vscale x 4 x half> @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
714 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16:
715 ; CHECK: # %bb.0: # %entry
716 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
717 ; CHECK-NEXT: fsrmi a0, 0
718 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
719 ; CHECK-NEXT: fsrm a0
722 %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
723 <vscale x 4 x half> %0,
725 <vscale x 4 x half> %2,
726 iXLen 0, iXLen %3, iXLen 0)
728 ret <vscale x 4 x half> %a
731 declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
736 iXLen, iXLen, iXLen);
738 define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
739 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
740 ; CHECK: # %bb.0: # %entry
741 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, mu
742 ; CHECK-NEXT: fsrmi a0, 0
743 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
744 ; CHECK-NEXT: fsrm a0
747 %a = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
748 <vscale x 4 x half> %0,
750 <vscale x 4 x half> %2,
751 <vscale x 4 x i1> %3,
752 iXLen 0, iXLen %4, iXLen 0);
754 ret <vscale x 4 x half> %a
757 declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
761 iXLen, iXLen, iXLen);
763 define <vscale x 8 x half> @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
764 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16:
765 ; CHECK: # %bb.0: # %entry
766 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
767 ; CHECK-NEXT: fsrmi a0, 0
768 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10
769 ; CHECK-NEXT: fsrm a0
772 %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
773 <vscale x 8 x half> %0,
775 <vscale x 8 x half> %2,
776 iXLen 0, iXLen %3, iXLen 0)
778 ret <vscale x 8 x half> %a
781 declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
786 iXLen, iXLen, iXLen);
788 define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
789 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
790 ; CHECK: # %bb.0: # %entry
791 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, mu
792 ; CHECK-NEXT: fsrmi a0, 0
793 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t
794 ; CHECK-NEXT: fsrm a0
797 %a = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
798 <vscale x 8 x half> %0,
800 <vscale x 8 x half> %2,
801 <vscale x 8 x i1> %3,
802 iXLen 0, iXLen %4, iXLen 0);
804 ret <vscale x 8 x half> %a
807 declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
808 <vscale x 16 x half>,
810 <vscale x 16 x half>,
811 iXLen, iXLen, iXLen);
813 define <vscale x 16 x half> @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
814 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16:
815 ; CHECK: # %bb.0: # %entry
816 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
817 ; CHECK-NEXT: fsrmi a0, 0
818 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12
819 ; CHECK-NEXT: fsrm a0
822 %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
823 <vscale x 16 x half> %0,
825 <vscale x 16 x half> %2,
826 iXLen 0, iXLen %3, iXLen 0)
828 ret <vscale x 16 x half> %a
831 declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
832 <vscale x 16 x half>,
834 <vscale x 16 x half>,
836 iXLen, iXLen, iXLen);
838 define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
839 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
840 ; CHECK: # %bb.0: # %entry
841 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
842 ; CHECK-NEXT: fsrmi a0, 0
843 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t
844 ; CHECK-NEXT: fsrm a0
847 %a = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
848 <vscale x 16 x half> %0,
850 <vscale x 16 x half> %2,
851 <vscale x 16 x i1> %3,
852 iXLen 0, iXLen %4, iXLen 0);
854 ret <vscale x 16 x half> %a
857 declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
858 <vscale x 1 x float>,
860 <vscale x 1 x float>,
861 iXLen, iXLen, iXLen);
863 define <vscale x 1 x float> @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
864 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32:
865 ; CHECK: # %bb.0: # %entry
866 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
867 ; CHECK-NEXT: fsrmi a0, 0
868 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
869 ; CHECK-NEXT: fsrm a0
872 %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
873 <vscale x 1 x float> %0,
875 <vscale x 1 x float> %2,
876 iXLen 0, iXLen %3, iXLen 0)
878 ret <vscale x 1 x float> %a
881 declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
882 <vscale x 1 x float>,
884 <vscale x 1 x float>,
886 iXLen, iXLen, iXLen);
888 define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
889 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
890 ; CHECK: # %bb.0: # %entry
891 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, mu
892 ; CHECK-NEXT: fsrmi a0, 0
893 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
894 ; CHECK-NEXT: fsrm a0
897 %a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
898 <vscale x 1 x float> %0,
900 <vscale x 1 x float> %2,
901 <vscale x 1 x i1> %3,
902 iXLen 0, iXLen %4, iXLen 0);
904 ret <vscale x 1 x float> %a
907 declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
908 <vscale x 2 x float>,
910 <vscale x 2 x float>,
911 iXLen, iXLen, iXLen);
913 define <vscale x 2 x float> @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
914 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32:
915 ; CHECK: # %bb.0: # %entry
916 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
917 ; CHECK-NEXT: fsrmi a0, 0
918 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
919 ; CHECK-NEXT: fsrm a0
922 %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
923 <vscale x 2 x float> %0,
925 <vscale x 2 x float> %2,
926 iXLen 0, iXLen %3, iXLen 0)
928 ret <vscale x 2 x float> %a
931 declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
932 <vscale x 2 x float>,
934 <vscale x 2 x float>,
936 iXLen, iXLen, iXLen);
938 define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
939 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
940 ; CHECK: # %bb.0: # %entry
941 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
942 ; CHECK-NEXT: fsrmi a0, 0
943 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
944 ; CHECK-NEXT: fsrm a0
947 %a = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
948 <vscale x 2 x float> %0,
950 <vscale x 2 x float> %2,
951 <vscale x 2 x i1> %3,
952 iXLen 0, iXLen %4, iXLen 0);
954 ret <vscale x 2 x float> %a
957 declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
958 <vscale x 4 x float>,
960 <vscale x 4 x float>,
961 iXLen, iXLen, iXLen);
963 define <vscale x 4 x float> @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
964 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32:
965 ; CHECK: # %bb.0: # %entry
966 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
967 ; CHECK-NEXT: fsrmi a0, 0
968 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10
969 ; CHECK-NEXT: fsrm a0
972 %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
973 <vscale x 4 x float> %0,
975 <vscale x 4 x float> %2,
976 iXLen 0, iXLen %3, iXLen 0)
978 ret <vscale x 4 x float> %a
981 declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
982 <vscale x 4 x float>,
984 <vscale x 4 x float>,
986 iXLen, iXLen, iXLen);
988 define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
989 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
990 ; CHECK: # %bb.0: # %entry
991 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, mu
992 ; CHECK-NEXT: fsrmi a0, 0
993 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t
994 ; CHECK-NEXT: fsrm a0
997 %a = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
998 <vscale x 4 x float> %0,
1000 <vscale x 4 x float> %2,
1001 <vscale x 4 x i1> %3,
1002 iXLen 0, iXLen %4, iXLen 0);
1004 ret <vscale x 4 x float> %a
1007 declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
1008 <vscale x 8 x float>,
1010 <vscale x 8 x float>,
1011 iXLen, iXLen, iXLen);
1013 define <vscale x 8 x float> @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
1014 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32:
1015 ; CHECK: # %bb.0: # %entry
1016 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
1017 ; CHECK-NEXT: fsrmi a0, 0
1018 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12
1019 ; CHECK-NEXT: fsrm a0
1022 %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
1023 <vscale x 8 x float> %0,
1025 <vscale x 8 x float> %2,
1026 iXLen 0, iXLen %3, iXLen 0)
1028 ret <vscale x 8 x float> %a
1031 declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
1032 <vscale x 8 x float>,
1034 <vscale x 8 x float>,
1036 iXLen, iXLen, iXLen);
1038 define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
1039 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
1040 ; CHECK: # %bb.0: # %entry
1041 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, mu
1042 ; CHECK-NEXT: fsrmi a0, 0
1043 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t
1044 ; CHECK-NEXT: fsrm a0
1047 %a = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
1048 <vscale x 8 x float> %0,
1050 <vscale x 8 x float> %2,
1051 <vscale x 8 x i1> %3,
1052 iXLen 0, iXLen %4, iXLen 0);
1054 ret <vscale x 8 x float> %a
1057 declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
1058 <vscale x 1 x double>,
1060 <vscale x 1 x double>,
1061 iXLen, iXLen, iXLen);
1063 define <vscale x 1 x double> @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
1064 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64:
1065 ; CHECK: # %bb.0: # %entry
1066 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
1067 ; CHECK-NEXT: fsrmi a0, 0
1068 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
1069 ; CHECK-NEXT: fsrm a0
1072 %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
1073 <vscale x 1 x double> %0,
1075 <vscale x 1 x double> %2,
1076 iXLen 0, iXLen %3, iXLen 0)
1078 ret <vscale x 1 x double> %a
1081 declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
1082 <vscale x 1 x double>,
1084 <vscale x 1 x double>,
1086 iXLen, iXLen, iXLen);
1088 define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
1089 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64:
1090 ; CHECK: # %bb.0: # %entry
1091 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, mu
1092 ; CHECK-NEXT: fsrmi a0, 0
1093 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9, v0.t
1094 ; CHECK-NEXT: fsrm a0
1097 %a = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
1098 <vscale x 1 x double> %0,
1100 <vscale x 1 x double> %2,
1101 <vscale x 1 x i1> %3,
1102 iXLen 0, iXLen %4, iXLen 0);
1104 ret <vscale x 1 x double> %a
1107 declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
1108 <vscale x 2 x double>,
1110 <vscale x 2 x double>,
1111 iXLen, iXLen, iXLen);
1113 define <vscale x 2 x double> @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
1114 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64:
1115 ; CHECK: # %bb.0: # %entry
1116 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, ma
1117 ; CHECK-NEXT: fsrmi a0, 0
1118 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10
1119 ; CHECK-NEXT: fsrm a0
1122 %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
1123 <vscale x 2 x double> %0,
1125 <vscale x 2 x double> %2,
1126 iXLen 0, iXLen %3, iXLen 0)
1128 ret <vscale x 2 x double> %a
1131 declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
1132 <vscale x 2 x double>,
1134 <vscale x 2 x double>,
1136 iXLen, iXLen, iXLen);
1138 define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
1139 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64:
1140 ; CHECK: # %bb.0: # %entry
1141 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, tu, mu
1142 ; CHECK-NEXT: fsrmi a0, 0
1143 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10, v0.t
1144 ; CHECK-NEXT: fsrm a0
1147 %a = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
1148 <vscale x 2 x double> %0,
1150 <vscale x 2 x double> %2,
1151 <vscale x 2 x i1> %3,
1152 iXLen 0, iXLen %4, iXLen 0);
1154 ret <vscale x 2 x double> %a
1157 declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
1158 <vscale x 4 x double>,
1160 <vscale x 4 x double>,
1161 iXLen, iXLen, iXLen);
1163 define <vscale x 4 x double> @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
1164 ; CHECK-LABEL: intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64:
1165 ; CHECK: # %bb.0: # %entry
1166 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, ma
1167 ; CHECK-NEXT: fsrmi a0, 0
1168 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12
1169 ; CHECK-NEXT: fsrm a0
1172 %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
1173 <vscale x 4 x double> %0,
1175 <vscale x 4 x double> %2,
1176 iXLen 0, iXLen %3, iXLen 0)
1178 ret <vscale x 4 x double> %a
1181 declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
1182 <vscale x 4 x double>,
1184 <vscale x 4 x double>,
1186 iXLen, iXLen, iXLen);
1188 define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
1189 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64:
1190 ; CHECK: # %bb.0: # %entry
1191 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, tu, mu
1192 ; CHECK-NEXT: fsrmi a0, 0
1193 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12, v0.t
1194 ; CHECK-NEXT: fsrm a0
1197 %a = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
1198 <vscale x 4 x double> %0,
1200 <vscale x 4 x double> %2,
1201 <vscale x 4 x i1> %3,
1202 iXLen 0, iXLen %4, iXLen 0);
1204 ret <vscale x 4 x double> %a