1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zvfh,+v -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zvfh,+v -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6 ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfhmin,+zvfhmin,+v -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfhmin,+zvfhmin,+v -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
11 ; This tests a mix of vfnmsac and vfnmsub by using different operand orders to
12 ; trigger commuting in TwoAddressInstructionPass.
14 declare <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, metadata, metadata)
16 define <vscale x 1 x half> @vfnmsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc) strictfp {
17 ; ZVFH-LABEL: vfnmsub_vv_nxv1f16:
19 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
20 ; ZVFH-NEXT: vfnmsub.vv v8, v9, v10
23 ; ZVFHMIN-LABEL: vfnmsub_vv_nxv1f16:
25 ; ZVFHMIN-NEXT: lui a0, 8
26 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
27 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v10
28 ; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
29 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
30 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
31 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
32 ; ZVFHMIN-NEXT: vfmadd.vv v12, v10, v11
33 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
34 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
36 %neg = fneg <vscale x 1 x half> %va
37 %vd = call <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half> %neg, <vscale x 1 x half> %vb, <vscale x 1 x half> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
38 ret <vscale x 1 x half> %vd
41 define <vscale x 1 x half> @vfnmsub_vf_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, half %c) strictfp {
42 ; ZVFH-LABEL: vfnmsub_vf_nxv1f16:
44 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
45 ; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9
48 ; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16:
50 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
51 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
52 ; ZVFHMIN-NEXT: vmv.v.x v10, a0
53 ; ZVFHMIN-NEXT: lui a0, 8
54 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9
55 ; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
56 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
57 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
58 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
59 ; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11
60 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
61 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
63 %head = insertelement <vscale x 1 x half> poison, half %c, i32 0
64 %splat = shufflevector <vscale x 1 x half> %head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
65 %neg = fneg <vscale x 1 x half> %va
66 %vd = call <vscale x 1 x half> @llvm.experimental.constrained.fma.nxv1f16(<vscale x 1 x half> %neg, <vscale x 1 x half> %splat, <vscale x 1 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
67 ret <vscale x 1 x half> %vd
70 declare <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, metadata, metadata)
72 define <vscale x 2 x half> @vfnmsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x half> %vc) strictfp {
73 ; ZVFH-LABEL: vfnmsub_vv_nxv2f16:
75 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
76 ; ZVFH-NEXT: vfnmsub.vv v8, v10, v9
79 ; ZVFHMIN-LABEL: vfnmsub_vv_nxv2f16:
81 ; ZVFHMIN-NEXT: lui a0, 8
82 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
83 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9
84 ; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
85 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
86 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
87 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
88 ; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11
89 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
90 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
92 %neg = fneg <vscale x 2 x half> %va
93 %vd = call <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half> %neg, <vscale x 2 x half> %vc, <vscale x 2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
94 ret <vscale x 2 x half> %vd
97 define <vscale x 2 x half> @vfnmsub_vf_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, half %c) strictfp {
98 ; ZVFH-LABEL: vfnmsub_vf_nxv2f16:
100 ; ZVFH-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
101 ; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9
104 ; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16:
106 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
107 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
108 ; ZVFHMIN-NEXT: vmv.v.x v10, a0
109 ; ZVFHMIN-NEXT: lui a0, 8
110 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9
111 ; ZVFHMIN-NEXT: vxor.vx v8, v8, a0
112 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v9, v8
113 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
114 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
115 ; ZVFHMIN-NEXT: vfmadd.vv v12, v9, v11
116 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
117 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v12
119 %head = insertelement <vscale x 2 x half> poison, half %c, i32 0
120 %splat = shufflevector <vscale x 2 x half> %head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
121 %neg = fneg <vscale x 2 x half> %va
122 %vd = call <vscale x 2 x half> @llvm.experimental.constrained.fma.nxv2f16(<vscale x 2 x half> %splat, <vscale x 2 x half> %neg, <vscale x 2 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
123 ret <vscale x 2 x half> %vd
126 declare <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, metadata, metadata)
128 define <vscale x 4 x half> @vfnmsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x half> %vc) strictfp {
129 ; ZVFH-LABEL: vfnmsub_vv_nxv4f16:
131 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
132 ; ZVFH-NEXT: vfnmsub.vv v8, v9, v10
135 ; ZVFHMIN-LABEL: vfnmsub_vv_nxv4f16:
137 ; ZVFHMIN-NEXT: lui a0, 8
138 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
139 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v10
140 ; ZVFHMIN-NEXT: vxor.vx v9, v9, a0
141 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
142 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8
143 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
144 ; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12
145 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
146 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14
148 %neg = fneg <vscale x 4 x half> %vb
149 %vd = call <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half> %neg, <vscale x 4 x half> %va, <vscale x 4 x half> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
150 ret <vscale x 4 x half> %vd
153 define <vscale x 4 x half> @vfnmsub_vf_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, half %c) strictfp {
154 ; ZVFH-LABEL: vfnmsub_vf_nxv4f16:
156 ; ZVFH-NEXT: vsetvli a0, zero, e16, m1, ta, ma
157 ; ZVFH-NEXT: vfnmsub.vf v8, fa0, v9
160 ; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16:
162 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
163 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
164 ; ZVFHMIN-NEXT: vmv.v.x v10, a0
165 ; ZVFHMIN-NEXT: lui a0, 8
166 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
167 ; ZVFHMIN-NEXT: vxor.vx v9, v10, a0
168 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v9
169 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8
170 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
171 ; ZVFHMIN-NEXT: vfmadd.vv v14, v10, v12
172 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
173 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v14
175 %head = insertelement <vscale x 4 x half> poison, half %c, i32 0
176 %splat = shufflevector <vscale x 4 x half> %head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
177 %neg = fneg <vscale x 4 x half> %splat
178 %vd = call <vscale x 4 x half> @llvm.experimental.constrained.fma.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %neg, <vscale x 4 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
179 ret <vscale x 4 x half> %vd
182 declare <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, metadata, metadata)
184 define <vscale x 8 x half> @vfnmsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x half> %vc) strictfp {
185 ; ZVFH-LABEL: vfnmsub_vv_nxv8f16:
187 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
188 ; ZVFH-NEXT: vfnmsac.vv v8, v12, v10
191 ; ZVFHMIN-LABEL: vfnmsub_vv_nxv8f16:
193 ; ZVFHMIN-NEXT: lui a0, 8
194 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
195 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
196 ; ZVFHMIN-NEXT: vxor.vx v8, v10, a0
197 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8
198 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
199 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
200 ; ZVFHMIN-NEXT: vfmadd.vv v24, v20, v16
201 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
202 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
204 %neg = fneg <vscale x 8 x half> %vb
205 %vd = call <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half> %neg, <vscale x 8 x half> %vc, <vscale x 8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
206 ret <vscale x 8 x half> %vd
209 define <vscale x 8 x half> @vfnmsub_vf_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, half %c) strictfp {
210 ; ZVFH-LABEL: vfnmsub_vf_nxv8f16:
212 ; ZVFH-NEXT: vsetvli a0, zero, e16, m2, ta, ma
213 ; ZVFH-NEXT: vfnmsac.vf v8, fa0, v10
216 ; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16:
218 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
219 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
220 ; ZVFHMIN-NEXT: vmv.v.x v12, a0
221 ; ZVFHMIN-NEXT: lui a0, 8
222 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
223 ; ZVFHMIN-NEXT: vxor.vx v8, v12, a0
224 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
225 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
226 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
227 ; ZVFHMIN-NEXT: vfmadd.vv v20, v12, v16
228 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
229 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v20
231 %head = insertelement <vscale x 8 x half> poison, half %c, i32 0
232 %splat = shufflevector <vscale x 8 x half> %head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
233 %neg = fneg <vscale x 8 x half> %splat
234 %vd = call <vscale x 8 x half> @llvm.experimental.constrained.fma.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x half> %neg, <vscale x 8 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
235 ret <vscale x 8 x half> %vd
238 declare <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, metadata, metadata)
240 define <vscale x 16 x half> @vfnmsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x half> %vc) strictfp {
241 ; ZVFH-LABEL: vfnmsub_vv_nxv16f16:
243 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
244 ; ZVFH-NEXT: vfnmsub.vv v8, v16, v12
247 ; ZVFHMIN-LABEL: vfnmsub_vv_nxv16f16:
249 ; ZVFHMIN-NEXT: lui a0, 8
250 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
251 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
252 ; ZVFHMIN-NEXT: vxor.vx v12, v16, a0
253 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
254 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
255 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
256 ; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
257 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
258 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
260 %neg = fneg <vscale x 16 x half> %vc
261 %vd = call <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half> %neg, <vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
262 ret <vscale x 16 x half> %vd
265 define <vscale x 16 x half> @vfnmsub_vf_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, half %c) strictfp {
266 ; ZVFH-LABEL: vfnmsub_vf_nxv16f16:
268 ; ZVFH-NEXT: vsetvli a0, zero, e16, m4, ta, ma
269 ; ZVFH-NEXT: vfnmsub.vf v8, fa0, v12
272 ; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16:
274 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
275 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
276 ; ZVFHMIN-NEXT: vmv.v.x v16, a0
277 ; ZVFHMIN-NEXT: lui a0, 8
278 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
279 ; ZVFHMIN-NEXT: vxor.vx v12, v16, a0
280 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
281 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
282 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
283 ; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
284 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
285 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
287 %head = insertelement <vscale x 16 x half> poison, half %c, i32 0
288 %splat = shufflevector <vscale x 16 x half> %head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
289 %neg = fneg <vscale x 16 x half> %splat
290 %vd = call <vscale x 16 x half> @llvm.experimental.constrained.fma.nxv16f16(<vscale x 16 x half> %neg, <vscale x 16 x half> %va, <vscale x 16 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
291 ret <vscale x 16 x half> %vd
294 declare <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, metadata, metadata)
296 define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x half> %vc) strictfp {
297 ; ZVFH-LABEL: vfnmsub_vv_nxv32f16:
299 ; ZVFH-NEXT: vl8re16.v v24, (a0)
300 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
301 ; ZVFH-NEXT: vfnmsub.vv v8, v24, v16
304 ; ZVFHMIN-LABEL: vfnmsub_vv_nxv32f16:
306 ; ZVFHMIN-NEXT: addi sp, sp, -16
307 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
308 ; ZVFHMIN-NEXT: csrr a1, vlenb
309 ; ZVFHMIN-NEXT: li a2, 24
310 ; ZVFHMIN-NEXT: mul a1, a1, a2
311 ; ZVFHMIN-NEXT: sub sp, sp, a1
312 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
313 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
314 ; ZVFHMIN-NEXT: vmv8r.v v0, v16
315 ; ZVFHMIN-NEXT: csrr a1, vlenb
316 ; ZVFHMIN-NEXT: slli a1, a1, 4
317 ; ZVFHMIN-NEXT: add a1, sp, a1
318 ; ZVFHMIN-NEXT: addi a1, a1, 16
319 ; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
320 ; ZVFHMIN-NEXT: csrr a1, vlenb
321 ; ZVFHMIN-NEXT: slli a1, a1, 3
322 ; ZVFHMIN-NEXT: add a1, sp, a1
323 ; ZVFHMIN-NEXT: addi a1, a1, 16
324 ; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
325 ; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
326 ; ZVFHMIN-NEXT: lui a0, 8
327 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
328 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
329 ; ZVFHMIN-NEXT: vxor.vx v0, v24, a0
330 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
331 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
332 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
333 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
334 ; ZVFHMIN-NEXT: vfmadd.vv v24, v8, v16
335 ; ZVFHMIN-NEXT: csrr a0, vlenb
336 ; ZVFHMIN-NEXT: slli a0, a0, 4
337 ; ZVFHMIN-NEXT: add a0, sp, a0
338 ; ZVFHMIN-NEXT: addi a0, a0, 16
339 ; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
340 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
341 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
342 ; ZVFHMIN-NEXT: addi a0, sp, 16
343 ; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
344 ; ZVFHMIN-NEXT: csrr a0, vlenb
345 ; ZVFHMIN-NEXT: slli a0, a0, 3
346 ; ZVFHMIN-NEXT: add a0, sp, a0
347 ; ZVFHMIN-NEXT: addi a0, a0, 16
348 ; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
349 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
350 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
351 ; ZVFHMIN-NEXT: addi a0, sp, 16
352 ; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
353 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
354 ; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v0
355 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
356 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
357 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
358 ; ZVFHMIN-NEXT: csrr a0, vlenb
359 ; ZVFHMIN-NEXT: li a1, 24
360 ; ZVFHMIN-NEXT: mul a0, a0, a1
361 ; ZVFHMIN-NEXT: add sp, sp, a0
362 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
363 ; ZVFHMIN-NEXT: addi sp, sp, 16
364 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
366 %neg = fneg <vscale x 32 x half> %vc
367 %vd = call <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half> %neg, <vscale x 32 x half> %va, <vscale x 32 x half> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
368 ret <vscale x 32 x half> %vd
371 define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, half %c) strictfp {
372 ; ZVFH-LABEL: vfnmsub_vf_nxv32f16:
374 ; ZVFH-NEXT: vsetvli a0, zero, e16, m8, ta, ma
375 ; ZVFH-NEXT: vfnmsac.vf v8, fa0, v16
378 ; ZVFHMIN-LABEL: vfnmsub_vf_nxv32f16:
380 ; ZVFHMIN-NEXT: addi sp, sp, -16
381 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
382 ; ZVFHMIN-NEXT: csrr a0, vlenb
383 ; ZVFHMIN-NEXT: li a1, 24
384 ; ZVFHMIN-NEXT: mul a0, a0, a1
385 ; ZVFHMIN-NEXT: sub sp, sp, a0
386 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
387 ; ZVFHMIN-NEXT: csrr a0, vlenb
388 ; ZVFHMIN-NEXT: slli a0, a0, 3
389 ; ZVFHMIN-NEXT: add a0, sp, a0
390 ; ZVFHMIN-NEXT: addi a0, a0, 16
391 ; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
392 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m8, ta, ma
393 ; ZVFHMIN-NEXT: vmv8r.v v0, v8
394 ; ZVFHMIN-NEXT: csrr a0, vlenb
395 ; ZVFHMIN-NEXT: slli a0, a0, 4
396 ; ZVFHMIN-NEXT: add a0, sp, a0
397 ; ZVFHMIN-NEXT: addi a0, a0, 16
398 ; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
399 ; ZVFHMIN-NEXT: fmv.x.h a0, fa0
400 ; ZVFHMIN-NEXT: vmv.v.x v24, a0
401 ; ZVFHMIN-NEXT: lui a0, 8
402 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
403 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
404 ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m8, ta, ma
405 ; ZVFHMIN-NEXT: vxor.vx v0, v24, a0
406 ; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
407 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
408 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
409 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
410 ; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8
411 ; ZVFHMIN-NEXT: csrr a0, vlenb
412 ; ZVFHMIN-NEXT: slli a0, a0, 4
413 ; ZVFHMIN-NEXT: add a0, sp, a0
414 ; ZVFHMIN-NEXT: addi a0, a0, 16
415 ; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
416 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
417 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
418 ; ZVFHMIN-NEXT: addi a0, sp, 16
419 ; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
420 ; ZVFHMIN-NEXT: csrr a0, vlenb
421 ; ZVFHMIN-NEXT: slli a0, a0, 3
422 ; ZVFHMIN-NEXT: add a0, sp, a0
423 ; ZVFHMIN-NEXT: addi a0, a0, 16
424 ; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
425 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
426 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
427 ; ZVFHMIN-NEXT: addi a0, sp, 16
428 ; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
429 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
430 ; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v0
431 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
432 ; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
433 ; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
434 ; ZVFHMIN-NEXT: csrr a0, vlenb
435 ; ZVFHMIN-NEXT: li a1, 24
436 ; ZVFHMIN-NEXT: mul a0, a0, a1
437 ; ZVFHMIN-NEXT: add sp, sp, a0
438 ; ZVFHMIN-NEXT: .cfi_def_cfa sp, 16
439 ; ZVFHMIN-NEXT: addi sp, sp, 16
440 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 0
442 %head = insertelement <vscale x 32 x half> poison, half %c, i32 0
443 %splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
444 %neg = fneg <vscale x 32 x half> %splat
445 %vd = call <vscale x 32 x half> @llvm.experimental.constrained.fma.nxv32f16(<vscale x 32 x half> %neg, <vscale x 32 x half> %vb, <vscale x 32 x half> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
446 ret <vscale x 32 x half> %vd
449 declare <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, metadata, metadata)
451 define <vscale x 1 x float> @vfnmsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x float> %vc) strictfp {
452 ; CHECK-LABEL: vfnmsub_vv_nxv1f32:
454 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
455 ; CHECK-NEXT: vfnmsub.vv v8, v9, v10
457 %neg = fneg <vscale x 1 x float> %vb
458 %vd = call <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %neg, <vscale x 1 x float> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
459 ret <vscale x 1 x float> %vd
462 define <vscale x 1 x float> @vfnmsub_vf_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, float %c) strictfp {
463 ; CHECK-LABEL: vfnmsub_vf_nxv1f32:
465 ; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
466 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
468 %head = insertelement <vscale x 1 x float> poison, float %c, i32 0
469 %splat = shufflevector <vscale x 1 x float> %head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
470 %neg = fneg <vscale x 1 x float> %va
471 %vd = call <vscale x 1 x float> @llvm.experimental.constrained.fma.nxv1f32(<vscale x 1 x float> %neg, <vscale x 1 x float> %splat, <vscale x 1 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
472 ret <vscale x 1 x float> %vd
475 declare <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, metadata, metadata)
477 define <vscale x 2 x float> @vfnmsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x float> %vc) strictfp {
478 ; CHECK-LABEL: vfnmsub_vv_nxv2f32:
480 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
481 ; CHECK-NEXT: vfnmsub.vv v8, v10, v9
483 %neg = fneg <vscale x 2 x float> %vc
484 %vd = call <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %neg, <vscale x 2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
485 ret <vscale x 2 x float> %vd
488 define <vscale x 2 x float> @vfnmsub_vf_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, float %c) strictfp {
489 ; CHECK-LABEL: vfnmsub_vf_nxv2f32:
491 ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
492 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
494 %head = insertelement <vscale x 2 x float> poison, float %c, i32 0
495 %splat = shufflevector <vscale x 2 x float> %head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
496 %neg = fneg <vscale x 2 x float> %va
497 %vd = call <vscale x 2 x float> @llvm.experimental.constrained.fma.nxv2f32(<vscale x 2 x float> %splat, <vscale x 2 x float> %neg, <vscale x 2 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
498 ret <vscale x 2 x float> %vd
501 declare <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, metadata, metadata)
503 define <vscale x 4 x float> @vfnmsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x float> %vc) strictfp {
504 ; CHECK-LABEL: vfnmsub_vv_nxv4f32:
506 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
507 ; CHECK-NEXT: vfnmsub.vv v8, v10, v12
509 %neg = fneg <vscale x 4 x float> %va
510 %vd = call <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x float> %neg, <vscale x 4 x float> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
511 ret <vscale x 4 x float> %vd
514 define <vscale x 4 x float> @vfnmsub_vf_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, float %c) strictfp {
515 ; CHECK-LABEL: vfnmsub_vf_nxv4f32:
517 ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
518 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10
520 %head = insertelement <vscale x 4 x float> poison, float %c, i32 0
521 %splat = shufflevector <vscale x 4 x float> %head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
522 %neg = fneg <vscale x 4 x float> %splat
523 %vd = call <vscale x 4 x float> @llvm.experimental.constrained.fma.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %neg, <vscale x 4 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
524 ret <vscale x 4 x float> %vd
527 declare <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, metadata, metadata)
529 define <vscale x 8 x float> @vfnmsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x float> %vc) strictfp {
530 ; CHECK-LABEL: vfnmsub_vv_nxv8f32:
532 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
533 ; CHECK-NEXT: vfnmsac.vv v8, v16, v12
535 %neg = fneg <vscale x 8 x float> %vc
536 %vd = call <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %neg, <vscale x 8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
537 ret <vscale x 8 x float> %vd
540 define <vscale x 8 x float> @vfnmsub_vf_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, float %c) strictfp {
541 ; CHECK-LABEL: vfnmsub_vf_nxv8f32:
543 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
544 ; CHECK-NEXT: vfnmsac.vf v8, fa0, v12
546 %head = insertelement <vscale x 8 x float> poison, float %c, i32 0
547 %splat = shufflevector <vscale x 8 x float> %head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
548 %neg = fneg <vscale x 8 x float> %splat
549 %vd = call <vscale x 8 x float> @llvm.experimental.constrained.fma.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x float> %neg, <vscale x 8 x float> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
550 ret <vscale x 8 x float> %vd
553 declare <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, metadata, metadata)
555 define <vscale x 16 x float> @vfnmsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x float> %vc) strictfp {
556 ; CHECK-LABEL: vfnmsub_vv_nxv16f32:
558 ; CHECK-NEXT: vl8re32.v v24, (a0)
559 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
560 ; CHECK-NEXT: vfnmsub.vv v8, v24, v16
562 %neg = fneg <vscale x 16 x float> %va
563 %vd = call <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float> %vc, <vscale x 16 x float> %neg, <vscale x 16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
564 ret <vscale x 16 x float> %vd
567 define <vscale x 16 x float> @vfnmsub_vf_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, float %c) strictfp {
568 ; CHECK-LABEL: vfnmsub_vf_nxv16f32:
570 ; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
571 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v16
573 %head = insertelement <vscale x 16 x float> poison, float %c, i32 0
574 %splat = shufflevector <vscale x 16 x float> %head, <vscale x 16 x float> poison, <vscale x 16 x i32> zeroinitializer
575 %neg = fneg <vscale x 16 x float> %splat
576 %vd = call <vscale x 16 x float> @llvm.experimental.constrained.fma.nxv16f32(<vscale x 16 x float> %neg, <vscale x 16 x float> %va, <vscale x 16 x float> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
577 ret <vscale x 16 x float> %vd
580 declare <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, metadata, metadata)
582 define <vscale x 1 x double> @vfnmsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x double> %vc) strictfp {
583 ; CHECK-LABEL: vfnmsub_vv_nxv1f64:
585 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
586 ; CHECK-NEXT: vfnmsac.vv v8, v10, v9
588 %neg = fneg <vscale x 1 x double> %vb
589 %vd = call <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double> %vc, <vscale x 1 x double> %neg, <vscale x 1 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
590 ret <vscale x 1 x double> %vd
593 define <vscale x 1 x double> @vfnmsub_vf_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, double %c) strictfp {
594 ; CHECK-LABEL: vfnmsub_vf_nxv1f64:
596 ; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
597 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v9
599 %head = insertelement <vscale x 1 x double> poison, double %c, i32 0
600 %splat = shufflevector <vscale x 1 x double> %head, <vscale x 1 x double> poison, <vscale x 1 x i32> zeroinitializer
601 %neg = fneg <vscale x 1 x double> %va
602 %vd = call <vscale x 1 x double> @llvm.experimental.constrained.fma.nxv1f64(<vscale x 1 x double> %neg, <vscale x 1 x double> %splat, <vscale x 1 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
603 ret <vscale x 1 x double> %vd
606 declare <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, metadata, metadata)
608 define <vscale x 2 x double> @vfnmsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x double> %vc) strictfp {
609 ; CHECK-LABEL: vfnmsub_vv_nxv2f64:
611 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
612 ; CHECK-NEXT: vfnmsub.vv v8, v12, v10
614 %neg = fneg <vscale x 2 x double> %va
615 %vd = call <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double> %neg, <vscale x 2 x double> %vc, <vscale x 2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
616 ret <vscale x 2 x double> %vd
619 define <vscale x 2 x double> @vfnmsub_vf_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, double %c) strictfp {
620 ; CHECK-LABEL: vfnmsub_vf_nxv2f64:
622 ; CHECK-NEXT: vsetvli a0, zero, e64, m2, ta, ma
623 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v10
625 %head = insertelement <vscale x 2 x double> poison, double %c, i32 0
626 %splat = shufflevector <vscale x 2 x double> %head, <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
627 %neg = fneg <vscale x 2 x double> %va
628 %vd = call <vscale x 2 x double> @llvm.experimental.constrained.fma.nxv2f64(<vscale x 2 x double> %splat, <vscale x 2 x double> %neg, <vscale x 2 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
629 ret <vscale x 2 x double> %vd
632 declare <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, metadata, metadata)
634 define <vscale x 4 x double> @vfnmsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x double> %vc) strictfp {
635 ; CHECK-LABEL: vfnmsub_vv_nxv4f64:
637 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
638 ; CHECK-NEXT: vfnmsub.vv v8, v12, v16
640 %neg = fneg <vscale x 4 x double> %vb
641 %vd = call <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double> %neg, <vscale x 4 x double> %va, <vscale x 4 x double> %vc, metadata !"round.dynamic", metadata !"fpexcept.strict")
642 ret <vscale x 4 x double> %vd
645 define <vscale x 4 x double> @vfnmsub_vf_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, double %c) strictfp {
646 ; CHECK-LABEL: vfnmsub_vf_nxv4f64:
648 ; CHECK-NEXT: vsetvli a0, zero, e64, m4, ta, ma
649 ; CHECK-NEXT: vfnmsub.vf v8, fa0, v12
651 %head = insertelement <vscale x 4 x double> poison, double %c, i32 0
652 %splat = shufflevector <vscale x 4 x double> %head, <vscale x 4 x double> poison, <vscale x 4 x i32> zeroinitializer
653 %neg = fneg <vscale x 4 x double> %splat
654 %vd = call <vscale x 4 x double> @llvm.experimental.constrained.fma.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %neg, <vscale x 4 x double> %vb, metadata !"round.dynamic", metadata !"fpexcept.strict")
655 ret <vscale x 4 x double> %vd
658 declare <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, metadata, metadata)
660 define <vscale x 8 x double> @vfnmsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x double> %vc) strictfp {
661 ; CHECK-LABEL: vfnmsub_vv_nxv8f64:
663 ; CHECK-NEXT: vl8re64.v v24, (a0)
664 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
665 ; CHECK-NEXT: vfnmsac.vv v8, v16, v24
667 %neg = fneg <vscale x 8 x double> %vb
668 %vd = call <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double> %neg, <vscale x 8 x double> %vc, <vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
669 ret <vscale x 8 x double> %vd
672 define <vscale x 8 x double> @vfnmsub_vf_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, double %c) strictfp {
673 ; CHECK-LABEL: vfnmsub_vf_nxv8f64:
675 ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
676 ; CHECK-NEXT: vfnmsac.vf v8, fa0, v16
678 %head = insertelement <vscale x 8 x double> poison, double %c, i32 0
679 %splat = shufflevector <vscale x 8 x double> %head, <vscale x 8 x double> poison, <vscale x 8 x i32> zeroinitializer
680 %neg = fneg <vscale x 8 x double> %splat
681 %vd = call <vscale x 8 x double> @llvm.experimental.constrained.fma.nxv8f64(<vscale x 8 x double> %vb, <vscale x 8 x double> %neg, <vscale x 8 x double> %va, metadata !"round.dynamic", metadata !"fpexcept.strict")
682 ret <vscale x 8 x double> %vd