1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
4 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \
5 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFH
6 ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=ilp32d \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
8 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfhmin,+v,+m -target-abi=lp64d \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,ZVFHMIN
11 declare <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x i1>, i32)
12 declare <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32)
13 declare <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half>, <vscale x 1 x i1>, i32)
15 define <vscale x 1 x float> @vfnmsac_vv_nxv1f32(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
16 ; ZVFH-LABEL: vfnmsac_vv_nxv1f32:
18 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
19 ; ZVFH-NEXT: vfwnmsac.vv v10, v8, v9, v0.t
20 ; ZVFH-NEXT: vmv1r.v v8, v10
23 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv1f32:
25 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
26 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8, v0.t
27 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9, v0.t
28 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
29 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v11, v10, v0.t
31 %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %m, i32 %evl)
32 %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
33 %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> %m, i32 %evl)
34 %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
35 ret <vscale x 1 x float> %v
38 define <vscale x 1 x float> @vfnmsac_vv_nxv1f32_unmasked(<vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x float> %c, i32 zeroext %evl) {
39 ; ZVFH-LABEL: vfnmsac_vv_nxv1f32_unmasked:
41 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
42 ; ZVFH-NEXT: vfwnmsac.vv v10, v8, v9
43 ; ZVFH-NEXT: vmv1r.v v8, v10
46 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv1f32_unmasked:
48 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
49 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
50 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
51 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
52 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v11, v10
54 %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
55 %bext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
56 %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
57 %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %bext, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
58 ret <vscale x 1 x float> %v
61 define <vscale x 1 x float> @vfnmsac_vf_nxv1f32(<vscale x 1 x half> %a, half %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
62 ; ZVFH-LABEL: vfnmsac_vf_nxv1f32:
64 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
65 ; ZVFH-NEXT: vfwnmsac.vf v9, fa0, v8, v0.t
66 ; ZVFH-NEXT: vmv1r.v v8, v9
69 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv1f32:
71 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
72 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
73 ; ZVFHMIN-NEXT: vfmv.v.f v10, fa5
74 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
75 ; ZVFHMIN-NEXT: vfncvt.f.f.w v11, v10
76 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
77 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8, v0.t
78 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v11, v0.t
79 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
80 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v10, v9, v0.t
82 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
83 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
84 %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %m, i32 %evl)
85 %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
86 %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> %m, i32 %evl)
87 %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vbext, <vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
88 ret <vscale x 1 x float> %v
91 define <vscale x 1 x float> @vfnmsac_vf_nxv1f32_commute(<vscale x 1 x half> %a, half %b, <vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
92 ; ZVFH-LABEL: vfnmsac_vf_nxv1f32_commute:
94 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
95 ; ZVFH-NEXT: vfwnmsac.vf v9, fa0, v8, v0.t
96 ; ZVFH-NEXT: vmv1r.v v8, v9
99 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv1f32_commute:
101 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
102 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
103 ; ZVFHMIN-NEXT: vfmv.v.f v10, fa5
104 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
105 ; ZVFHMIN-NEXT: vfncvt.f.f.w v11, v10
106 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
107 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8, v0.t
108 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v11, v0.t
109 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
110 ; ZVFHMIN-NEXT: vfnmsub.vv v10, v8, v9, v0.t
111 ; ZVFHMIN-NEXT: vmv1r.v v8, v10
113 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
114 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
115 %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> %m, i32 %evl)
116 %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
117 %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> %m, i32 %evl)
118 %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %vbext, <vscale x 1 x float> %nega, <vscale x 1 x float> %c, <vscale x 1 x i1> %m, i32 %evl)
119 ret <vscale x 1 x float> %v
122 define <vscale x 1 x float> @vfnmsac_vf_nxv1f32_unmasked(<vscale x 1 x half> %a, half %b, <vscale x 1 x float> %c, i32 zeroext %evl) {
123 ; ZVFH-LABEL: vfnmsac_vf_nxv1f32_unmasked:
125 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
126 ; ZVFH-NEXT: vfwnmsac.vf v9, fa0, v8
127 ; ZVFH-NEXT: vmv1r.v v8, v9
130 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv1f32_unmasked:
132 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
133 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
134 ; ZVFHMIN-NEXT: vfmv.v.f v10, fa5
135 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
136 ; ZVFHMIN-NEXT: vfncvt.f.f.w v11, v10
137 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
138 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
139 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v11
140 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
141 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v10, v9
143 %elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
144 %vb = shufflevector <vscale x 1 x half> %elt.head, <vscale x 1 x half> poison, <vscale x 1 x i32> zeroinitializer
145 %aext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
146 %vbext = call <vscale x 1 x float> @llvm.vp.fpext.nxv1f32.nxv1f16(<vscale x 1 x half> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
147 %nega = call <vscale x 1 x float> @llvm.vp.fneg.nxv1f32(<vscale x 1 x float> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
148 %v = call <vscale x 1 x float> @llvm.vp.fma.nxv1f32(<vscale x 1 x float> %nega, <vscale x 1 x float> %vbext, <vscale x 1 x float> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
149 ret <vscale x 1 x float> %v
152 declare <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x i1>, i32)
153 declare <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
154 declare <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half>, <vscale x 2 x i1>, i32)
156 define <vscale x 2 x float> @vfnmsac_vv_nxv2f32(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
157 ; ZVFH-LABEL: vfnmsac_vv_nxv2f32:
159 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
160 ; ZVFH-NEXT: vfwnmsac.vv v10, v8, v9, v0.t
161 ; ZVFH-NEXT: vmv1r.v v8, v10
164 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv2f32:
166 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
167 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8, v0.t
168 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9, v0.t
169 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
170 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v11, v10, v0.t
172 %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %m, i32 %evl)
173 %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
174 %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> %m, i32 %evl)
175 %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %bext, <vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
176 ret <vscale x 2 x float> %v
179 define <vscale x 2 x float> @vfnmsac_vv_nxv2f32_unmasked(<vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x float> %c, i32 zeroext %evl) {
180 ; ZVFH-LABEL: vfnmsac_vv_nxv2f32_unmasked:
182 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
183 ; ZVFH-NEXT: vfwnmsac.vv v10, v8, v9
184 ; ZVFH-NEXT: vmv1r.v v8, v10
187 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv2f32_unmasked:
189 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
190 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v8
191 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v9
192 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
193 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v11, v10
195 %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
196 %bext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
197 %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
198 %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %bext, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
199 ret <vscale x 2 x float> %v
202 define <vscale x 2 x float> @vfnmsac_vf_nxv2f32(<vscale x 2 x half> %a, half %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
203 ; ZVFH-LABEL: vfnmsac_vf_nxv2f32:
205 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
206 ; ZVFH-NEXT: vfwnmsac.vf v9, fa0, v8, v0.t
207 ; ZVFH-NEXT: vmv1r.v v8, v9
210 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv2f32:
212 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
213 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m1, ta, ma
214 ; ZVFHMIN-NEXT: vfmv.v.f v10, fa5
215 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
216 ; ZVFHMIN-NEXT: vfncvt.f.f.w v11, v10
217 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
218 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8, v0.t
219 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v11, v0.t
220 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
221 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v10, v9, v0.t
223 %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
224 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
225 %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %m, i32 %evl)
226 %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
227 %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> %m, i32 %evl)
228 %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vbext, <vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
229 ret <vscale x 2 x float> %v
232 define <vscale x 2 x float> @vfnmsac_vf_nxv2f32_commute(<vscale x 2 x half> %a, half %b, <vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
233 ; ZVFH-LABEL: vfnmsac_vf_nxv2f32_commute:
235 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
236 ; ZVFH-NEXT: vfwnmsac.vf v9, fa0, v8, v0.t
237 ; ZVFH-NEXT: vmv1r.v v8, v9
240 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv2f32_commute:
242 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
243 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m1, ta, ma
244 ; ZVFHMIN-NEXT: vfmv.v.f v10, fa5
245 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
246 ; ZVFHMIN-NEXT: vfncvt.f.f.w v11, v10
247 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
248 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8, v0.t
249 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v11, v0.t
250 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
251 ; ZVFHMIN-NEXT: vfnmsub.vv v10, v8, v9, v0.t
252 ; ZVFHMIN-NEXT: vmv.v.v v8, v10
254 %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
255 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
256 %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> %m, i32 %evl)
257 %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
258 %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> %m, i32 %evl)
259 %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %vbext, <vscale x 2 x float> %nega, <vscale x 2 x float> %c, <vscale x 2 x i1> %m, i32 %evl)
260 ret <vscale x 2 x float> %v
263 define <vscale x 2 x float> @vfnmsac_vf_nxv2f32_unmasked(<vscale x 2 x half> %a, half %b, <vscale x 2 x float> %c, i32 zeroext %evl) {
264 ; ZVFH-LABEL: vfnmsac_vf_nxv2f32_unmasked:
266 ; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
267 ; ZVFH-NEXT: vfwnmsac.vf v9, fa0, v8
268 ; ZVFH-NEXT: vmv1r.v v8, v9
271 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv2f32_unmasked:
273 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
274 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m1, ta, ma
275 ; ZVFHMIN-NEXT: vfmv.v.f v10, fa5
276 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
277 ; ZVFHMIN-NEXT: vfncvt.f.f.w v11, v10
278 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
279 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v10, v8
280 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v11
281 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m1, ta, ma
282 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v10, v9
284 %elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
285 %vb = shufflevector <vscale x 2 x half> %elt.head, <vscale x 2 x half> poison, <vscale x 2 x i32> zeroinitializer
286 %aext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
287 %vbext = call <vscale x 2 x float> @llvm.vp.fpext.nxv2f32.nxv2f16(<vscale x 2 x half> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
288 %nega = call <vscale x 2 x float> @llvm.vp.fneg.nxv2f32(<vscale x 2 x float> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
289 %v = call <vscale x 2 x float> @llvm.vp.fma.nxv2f32(<vscale x 2 x float> %nega, <vscale x 2 x float> %vbext, <vscale x 2 x float> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
290 ret <vscale x 2 x float> %v
293 declare <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, i32)
294 declare <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32)
295 declare <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half>, <vscale x 4 x i1>, i32)
297 define <vscale x 4 x float> @vfnmsac_vv_nxv4f32(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
298 ; ZVFH-LABEL: vfnmsac_vv_nxv4f32:
300 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
301 ; ZVFH-NEXT: vfwnmsac.vv v10, v8, v9, v0.t
302 ; ZVFH-NEXT: vmv2r.v v8, v10
305 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv4f32:
307 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
308 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8, v0.t
309 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9, v0.t
310 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
311 ; ZVFHMIN-NEXT: vfnmsub.vv v12, v14, v10, v0.t
312 ; ZVFHMIN-NEXT: vmv.v.v v8, v12
314 %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %m, i32 %evl)
315 %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
316 %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> %m, i32 %evl)
317 %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %bext, <vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
318 ret <vscale x 4 x float> %v
321 define <vscale x 4 x float> @vfnmsac_vv_nxv4f32_unmasked(<vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x float> %c, i32 zeroext %evl) {
322 ; ZVFH-LABEL: vfnmsac_vv_nxv4f32_unmasked:
324 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
325 ; ZVFH-NEXT: vfwnmsac.vv v10, v8, v9
326 ; ZVFH-NEXT: vmv2r.v v8, v10
329 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv4f32_unmasked:
331 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
332 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v8
333 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9
334 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
335 ; ZVFHMIN-NEXT: vfnmsub.vv v12, v14, v10
336 ; ZVFHMIN-NEXT: vmv.v.v v8, v12
338 %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
339 %bext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
340 %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
341 %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %bext, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
342 ret <vscale x 4 x float> %v
345 define <vscale x 4 x float> @vfnmsac_vf_nxv4f32(<vscale x 4 x half> %a, half %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
346 ; ZVFH-LABEL: vfnmsac_vf_nxv4f32:
348 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
349 ; ZVFH-NEXT: vfwnmsac.vf v10, fa0, v8, v0.t
350 ; ZVFH-NEXT: vmv2r.v v8, v10
353 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv4f32:
355 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
356 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m2, ta, ma
357 ; ZVFHMIN-NEXT: vfmv.v.f v12, fa5
358 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
359 ; ZVFHMIN-NEXT: vfncvt.f.f.w v14, v12
360 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
361 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8, v0.t
362 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v14, v0.t
363 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
364 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v12, v10, v0.t
366 %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
367 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
368 %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %m, i32 %evl)
369 %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
370 %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> %m, i32 %evl)
371 %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vbext, <vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
372 ret <vscale x 4 x float> %v
375 define <vscale x 4 x float> @vfnmsac_vf_nxv4f32_commute(<vscale x 4 x half> %a, half %b, <vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
376 ; ZVFH-LABEL: vfnmsac_vf_nxv4f32_commute:
378 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
379 ; ZVFH-NEXT: vfwnmsac.vf v10, fa0, v8, v0.t
380 ; ZVFH-NEXT: vmv2r.v v8, v10
383 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv4f32_commute:
385 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
386 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m2, ta, ma
387 ; ZVFHMIN-NEXT: vfmv.v.f v12, fa5
388 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
389 ; ZVFHMIN-NEXT: vfncvt.f.f.w v9, v12
390 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
391 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8, v0.t
392 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9, v0.t
393 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
394 ; ZVFHMIN-NEXT: vfnmsub.vv v12, v14, v10, v0.t
395 ; ZVFHMIN-NEXT: vmv.v.v v8, v12
397 %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
398 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
399 %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> %m, i32 %evl)
400 %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
401 %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> %m, i32 %evl)
402 %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %vbext, <vscale x 4 x float> %nega, <vscale x 4 x float> %c, <vscale x 4 x i1> %m, i32 %evl)
403 ret <vscale x 4 x float> %v
406 define <vscale x 4 x float> @vfnmsac_vf_nxv4f32_unmasked(<vscale x 4 x half> %a, half %b, <vscale x 4 x float> %c, i32 zeroext %evl) {
407 ; ZVFH-LABEL: vfnmsac_vf_nxv4f32_unmasked:
409 ; ZVFH-NEXT: vsetvli zero, a0, e16, m1, ta, ma
410 ; ZVFH-NEXT: vfwnmsac.vf v10, fa0, v8
411 ; ZVFH-NEXT: vmv2r.v v8, v10
414 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv4f32_unmasked:
416 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
417 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m2, ta, ma
418 ; ZVFHMIN-NEXT: vfmv.v.f v12, fa5
419 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m1, ta, ma
420 ; ZVFHMIN-NEXT: vfncvt.f.f.w v14, v12
421 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma
422 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v8
423 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v14
424 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
425 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v12, v10
427 %elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
428 %vb = shufflevector <vscale x 4 x half> %elt.head, <vscale x 4 x half> poison, <vscale x 4 x i32> zeroinitializer
429 %aext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
430 %vbext = call <vscale x 4 x float> @llvm.vp.fpext.nxv4f32.nxv4f16(<vscale x 4 x half> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
431 %nega = call <vscale x 4 x float> @llvm.vp.fneg.nxv4f32(<vscale x 4 x float> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
432 %v = call <vscale x 4 x float> @llvm.vp.fma.nxv4f32(<vscale x 4 x float> %nega, <vscale x 4 x float> %vbext, <vscale x 4 x float> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
433 ret <vscale x 4 x float> %v
436 declare <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x i1>, i32)
437 declare <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32)
438 declare <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, i32)
440 define <vscale x 8 x float> @vfnmsac_vv_nxv8f32(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
441 ; ZVFH-LABEL: vfnmsac_vv_nxv8f32:
443 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
444 ; ZVFH-NEXT: vfwnmsac.vv v12, v8, v10, v0.t
445 ; ZVFH-NEXT: vmv4r.v v8, v12
448 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv8f32:
450 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
451 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8, v0.t
452 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10, v0.t
453 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
454 ; ZVFHMIN-NEXT: vfnmsub.vv v16, v20, v12, v0.t
455 ; ZVFHMIN-NEXT: vmv.v.v v8, v16
457 %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %m, i32 %evl)
458 %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
459 %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> %m, i32 %evl)
460 %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %bext, <vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
461 ret <vscale x 8 x float> %v
464 define <vscale x 8 x float> @vfnmsac_vv_nxv8f32_unmasked(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x float> %c, i32 zeroext %evl) {
465 ; ZVFH-LABEL: vfnmsac_vv_nxv8f32_unmasked:
467 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
468 ; ZVFH-NEXT: vfwnmsac.vv v12, v8, v10
469 ; ZVFH-NEXT: vmv4r.v v8, v12
472 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv8f32_unmasked:
474 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
475 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v8
476 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10
477 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
478 ; ZVFHMIN-NEXT: vfnmsub.vv v16, v20, v12
479 ; ZVFHMIN-NEXT: vmv.v.v v8, v16
481 %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
482 %bext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
483 %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
484 %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %bext, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
485 ret <vscale x 8 x float> %v
488 define <vscale x 8 x float> @vfnmsac_vf_nxv8f32(<vscale x 8 x half> %a, half %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
489 ; ZVFH-LABEL: vfnmsac_vf_nxv8f32:
491 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
492 ; ZVFH-NEXT: vfwnmsac.vf v12, fa0, v8, v0.t
493 ; ZVFH-NEXT: vmv4r.v v8, v12
496 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv8f32:
498 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
499 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m4, ta, ma
500 ; ZVFHMIN-NEXT: vfmv.v.f v16, fa5
501 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
502 ; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v16
503 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
504 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
505 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20, v0.t
506 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
507 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v16, v12, v0.t
509 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
510 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
511 %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %m, i32 %evl)
512 %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
513 %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> %m, i32 %evl)
514 %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vbext, <vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
515 ret <vscale x 8 x float> %v
518 define <vscale x 8 x float> @vfnmsac_vf_nxv8f32_commute(<vscale x 8 x half> %a, half %b, <vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
519 ; ZVFH-LABEL: vfnmsac_vf_nxv8f32_commute:
521 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
522 ; ZVFH-NEXT: vfwnmsac.vf v12, fa0, v8, v0.t
523 ; ZVFH-NEXT: vmv4r.v v8, v12
526 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv8f32_commute:
528 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
529 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m4, ta, ma
530 ; ZVFHMIN-NEXT: vfmv.v.f v16, fa5
531 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
532 ; ZVFHMIN-NEXT: vfncvt.f.f.w v10, v16
533 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
534 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
535 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10, v0.t
536 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
537 ; ZVFHMIN-NEXT: vfnmsub.vv v16, v20, v12, v0.t
538 ; ZVFHMIN-NEXT: vmv.v.v v8, v16
540 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
541 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
542 %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> %m, i32 %evl)
543 %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
544 %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> %m, i32 %evl)
545 %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %vbext, <vscale x 8 x float> %nega, <vscale x 8 x float> %c, <vscale x 8 x i1> %m, i32 %evl)
546 ret <vscale x 8 x float> %v
549 define <vscale x 8 x float> @vfnmsac_vf_nxv8f32_unmasked(<vscale x 8 x half> %a, half %b, <vscale x 8 x float> %c, i32 zeroext %evl) {
550 ; ZVFH-LABEL: vfnmsac_vf_nxv8f32_unmasked:
552 ; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
553 ; ZVFH-NEXT: vfwnmsac.vf v12, fa0, v8
554 ; ZVFH-NEXT: vmv4r.v v8, v12
557 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv8f32_unmasked:
559 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
560 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m4, ta, ma
561 ; ZVFHMIN-NEXT: vfmv.v.f v16, fa5
562 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m2, ta, ma
563 ; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v16
564 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma
565 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
566 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
567 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
568 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v16, v12
570 %elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
571 %vb = shufflevector <vscale x 8 x half> %elt.head, <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
572 %aext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
573 %vbext = call <vscale x 8 x float> @llvm.vp.fpext.nxv8f32.nxv8f16(<vscale x 8 x half> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
574 %nega = call <vscale x 8 x float> @llvm.vp.fneg.nxv8f32(<vscale x 8 x float> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
575 %v = call <vscale x 8 x float> @llvm.vp.fma.nxv8f32(<vscale x 8 x float> %nega, <vscale x 8 x float> %vbext, <vscale x 8 x float> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
576 ret <vscale x 8 x float> %v
579 declare <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x i1>, i32)
580 declare <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float>, <vscale x 16 x i1>, i32)
581 declare <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half>, <vscale x 16 x i1>, i32)
583 define <vscale x 16 x float> @vfnmsac_vv_nxv16f32(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
584 ; ZVFH-LABEL: vfnmsac_vv_nxv16f32:
586 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
587 ; ZVFH-NEXT: vfwnmsac.vv v16, v8, v12, v0.t
588 ; ZVFH-NEXT: vmv8r.v v8, v16
591 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv16f32:
593 ; ZVFHMIN-NEXT: addi sp, sp, -16
594 ; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
595 ; ZVFHMIN-NEXT: csrr a1, vlenb
596 ; ZVFHMIN-NEXT: slli a1, a1, 3
597 ; ZVFHMIN-NEXT: sub sp, sp, a1
598 ; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
599 ; ZVFHMIN-NEXT: addi a1, sp, 16
600 ; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
601 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
602 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
603 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
604 ; ZVFHMIN-NEXT: addi a0, sp, 16
605 ; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
606 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
607 ; ZVFHMIN-NEXT: vfnmsub.vv v24, v16, v8, v0.t
608 ; ZVFHMIN-NEXT: vmv.v.v v8, v24
609 ; ZVFHMIN-NEXT: csrr a0, vlenb
610 ; ZVFHMIN-NEXT: slli a0, a0, 3
611 ; ZVFHMIN-NEXT: add sp, sp, a0
612 ; ZVFHMIN-NEXT: addi sp, sp, 16
614 %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
615 %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
616 %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> %m, i32 %evl)
617 %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %bext, <vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
618 ret <vscale x 16 x float> %v
621 define <vscale x 16 x float> @vfnmsac_vv_nxv16f32_unmasked(<vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x float> %c, i32 zeroext %evl) {
622 ; ZVFH-LABEL: vfnmsac_vv_nxv16f32_unmasked:
624 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
625 ; ZVFH-NEXT: vfwnmsac.vv v16, v8, v12
626 ; ZVFH-NEXT: vmv8r.v v8, v16
629 ; ZVFHMIN-LABEL: vfnmsac_vv_nxv16f32_unmasked:
631 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
632 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
633 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
634 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
635 ; ZVFHMIN-NEXT: vfnmsub.vv v24, v0, v16
636 ; ZVFHMIN-NEXT: vmv.v.v v8, v24
638 %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
639 %bext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %b, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
640 %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
641 %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %bext, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
642 ret <vscale x 16 x float> %v
645 define <vscale x 16 x float> @vfnmsac_vf_nxv16f32(<vscale x 16 x half> %a, half %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
646 ; ZVFH-LABEL: vfnmsac_vf_nxv16f32:
648 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
649 ; ZVFH-NEXT: vfwnmsac.vf v16, fa0, v8, v0.t
650 ; ZVFH-NEXT: vmv8r.v v8, v16
653 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv16f32:
655 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
656 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
657 ; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
658 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
659 ; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
660 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
661 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t
662 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4, v0.t
663 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
664 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v24, v16, v0.t
666 %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
667 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
668 %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
669 %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
670 %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> %m, i32 %evl)
671 %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vbext, <vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
672 ret <vscale x 16 x float> %v
675 define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_commute(<vscale x 16 x half> %a, half %b, <vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 zeroext %evl) {
676 ; ZVFH-LABEL: vfnmsac_vf_nxv16f32_commute:
678 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
679 ; ZVFH-NEXT: vfwnmsac.vf v16, fa0, v8, v0.t
680 ; ZVFH-NEXT: vmv8r.v v8, v16
683 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv16f32_commute:
685 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
686 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
687 ; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
688 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
689 ; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
690 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
691 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t
692 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4, v0.t
693 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
694 ; ZVFHMIN-NEXT: vfnmsub.vv v24, v8, v16, v0.t
695 ; ZVFHMIN-NEXT: vmv.v.v v8, v24
697 %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
698 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
699 %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> %m, i32 %evl)
700 %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
701 %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> %m, i32 %evl)
702 %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %vbext, <vscale x 16 x float> %nega, <vscale x 16 x float> %c, <vscale x 16 x i1> %m, i32 %evl)
703 ret <vscale x 16 x float> %v
706 define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_unmasked(<vscale x 16 x half> %a, half %b, <vscale x 16 x float> %c, i32 zeroext %evl) {
707 ; ZVFH-LABEL: vfnmsac_vf_nxv16f32_unmasked:
709 ; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
710 ; ZVFH-NEXT: vfwnmsac.vf v16, fa0, v8
711 ; ZVFH-NEXT: vmv8r.v v8, v16
714 ; ZVFHMIN-LABEL: vfnmsac_vf_nxv16f32_unmasked:
716 ; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
717 ; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
718 ; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
719 ; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
720 ; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
721 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
722 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
723 ; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
724 ; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
725 ; ZVFHMIN-NEXT: vfnmsub.vv v8, v24, v16
727 %elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
728 %vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
729 %aext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %a, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
730 %vbext = call <vscale x 16 x float> @llvm.vp.fpext.nxv16f32.nxv16f16(<vscale x 16 x half> %vb, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
731 %nega = call <vscale x 16 x float> @llvm.vp.fneg.nxv16f32(<vscale x 16 x float> %aext, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
732 %v = call <vscale x 16 x float> @llvm.vp.fma.nxv16f32(<vscale x 16 x float> %nega, <vscale x 16 x float> %vbext, <vscale x 16 x float> %c, <vscale x 16 x i1> splat (i1 -1), i32 %evl)
733 ret <vscale x 16 x float> %v
736 declare <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x i1>, i32)
737 declare <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double>, <vscale x 1 x i1>, i32)
738 declare <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float>, <vscale x 1 x i1>, i32)
740 define <vscale x 1 x double> @vfnmsac_vv_nxv1f64(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
741 ; CHECK-LABEL: vfnmsac_vv_nxv1f64:
743 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
744 ; CHECK-NEXT: vfwnmsac.vv v10, v8, v9, v0.t
745 ; CHECK-NEXT: vmv1r.v v8, v10
747 %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %m, i32 %evl)
748 %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
749 %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> %m, i32 %evl)
750 %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %bext, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
751 ret <vscale x 1 x double> %v
754 define <vscale x 1 x double> @vfnmsac_vv_nxv1f64_unmasked(<vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x double> %c, i32 zeroext %evl) {
755 ; CHECK-LABEL: vfnmsac_vv_nxv1f64_unmasked:
757 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
758 ; CHECK-NEXT: vfwnmsac.vv v10, v8, v9
759 ; CHECK-NEXT: vmv1r.v v8, v10
761 %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
762 %bext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %b, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
763 %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
764 %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %bext, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
765 ret <vscale x 1 x double> %v
768 define <vscale x 1 x double> @vfnmsac_vf_nxv1f64(<vscale x 1 x float> %a, float %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
769 ; CHECK-LABEL: vfnmsac_vf_nxv1f64:
771 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
772 ; CHECK-NEXT: vfwnmsac.vf v9, fa0, v8, v0.t
773 ; CHECK-NEXT: vmv1r.v v8, v9
775 %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
776 %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
777 %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %m, i32 %evl)
778 %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
779 %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> %m, i32 %evl)
780 %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vbext, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
781 ret <vscale x 1 x double> %v
784 define <vscale x 1 x double> @vfnmsac_vf_nxv1f64_commute(<vscale x 1 x float> %a, float %b, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 zeroext %evl) {
785 ; CHECK-LABEL: vfnmsac_vf_nxv1f64_commute:
787 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
788 ; CHECK-NEXT: vfwnmsac.vf v9, fa0, v8, v0.t
789 ; CHECK-NEXT: vmv1r.v v8, v9
791 %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
792 %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
793 %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> %m, i32 %evl)
794 %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
795 %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> %m, i32 %evl)
796 %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %vbext, <vscale x 1 x double> %nega, <vscale x 1 x double> %c, <vscale x 1 x i1> %m, i32 %evl)
797 ret <vscale x 1 x double> %v
800 define <vscale x 1 x double> @vfnmsac_vf_nxv1f64_unmasked(<vscale x 1 x float> %a, float %b, <vscale x 1 x double> %c, i32 zeroext %evl) {
801 ; CHECK-LABEL: vfnmsac_vf_nxv1f64_unmasked:
803 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
804 ; CHECK-NEXT: vfwnmsac.vf v9, fa0, v8
805 ; CHECK-NEXT: vmv1r.v v8, v9
807 %elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
808 %vb = shufflevector <vscale x 1 x float> %elt.head, <vscale x 1 x float> poison, <vscale x 1 x i32> zeroinitializer
809 %aext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
810 %vbext = call <vscale x 1 x double> @llvm.vp.fpext.nxv1f64.nxv1f32(<vscale x 1 x float> %vb, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
811 %nega = call <vscale x 1 x double> @llvm.vp.fneg.nxv1f64(<vscale x 1 x double> %aext, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
812 %v = call <vscale x 1 x double> @llvm.vp.fma.nxv1f64(<vscale x 1 x double> %nega, <vscale x 1 x double> %vbext, <vscale x 1 x double> %c, <vscale x 1 x i1> splat (i1 -1), i32 %evl)
813 ret <vscale x 1 x double> %v
816 declare <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, i32)
817 declare <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, i32)
818 declare <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float>, <vscale x 2 x i1>, i32)
820 define <vscale x 2 x double> @vfnmsac_vv_nxv2f64(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
821 ; CHECK-LABEL: vfnmsac_vv_nxv2f64:
823 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
824 ; CHECK-NEXT: vfwnmsac.vv v10, v8, v9, v0.t
825 ; CHECK-NEXT: vmv2r.v v8, v10
827 %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 %evl)
828 %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
829 %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> %m, i32 %evl)
830 %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %bext, <vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
831 ret <vscale x 2 x double> %v
834 define <vscale x 2 x double> @vfnmsac_vv_nxv2f64_unmasked(<vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x double> %c, i32 zeroext %evl) {
835 ; CHECK-LABEL: vfnmsac_vv_nxv2f64_unmasked:
837 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
838 ; CHECK-NEXT: vfwnmsac.vv v10, v8, v9
839 ; CHECK-NEXT: vmv2r.v v8, v10
841 %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
842 %bext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %b, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
843 %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
844 %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %bext, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
845 ret <vscale x 2 x double> %v
848 define <vscale x 2 x double> @vfnmsac_vf_nxv2f64(<vscale x 2 x float> %a, float %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
849 ; CHECK-LABEL: vfnmsac_vf_nxv2f64:
851 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
852 ; CHECK-NEXT: vfwnmsac.vf v10, fa0, v8, v0.t
853 ; CHECK-NEXT: vmv2r.v v8, v10
855 %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
856 %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
857 %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 %evl)
858 %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
859 %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> %m, i32 %evl)
860 %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vbext, <vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
861 ret <vscale x 2 x double> %v
864 define <vscale x 2 x double> @vfnmsac_vf_nxv2f64_commute(<vscale x 2 x float> %a, float %b, <vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 zeroext %evl) {
865 ; CHECK-LABEL: vfnmsac_vf_nxv2f64_commute:
867 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
868 ; CHECK-NEXT: vfwnmsac.vf v10, fa0, v8, v0.t
869 ; CHECK-NEXT: vmv2r.v v8, v10
871 %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
872 %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
873 %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> %m, i32 %evl)
874 %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
875 %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> %m, i32 %evl)
876 %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %vbext, <vscale x 2 x double> %nega, <vscale x 2 x double> %c, <vscale x 2 x i1> %m, i32 %evl)
877 ret <vscale x 2 x double> %v
880 define <vscale x 2 x double> @vfnmsac_vf_nxv2f64_unmasked(<vscale x 2 x float> %a, float %b, <vscale x 2 x double> %c, i32 zeroext %evl) {
881 ; CHECK-LABEL: vfnmsac_vf_nxv2f64_unmasked:
883 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
884 ; CHECK-NEXT: vfwnmsac.vf v10, fa0, v8
885 ; CHECK-NEXT: vmv2r.v v8, v10
887 %elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
888 %vb = shufflevector <vscale x 2 x float> %elt.head, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
889 %aext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
890 %vbext = call <vscale x 2 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<vscale x 2 x float> %vb, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
891 %nega = call <vscale x 2 x double> @llvm.vp.fneg.nxv2f64(<vscale x 2 x double> %aext, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
892 %v = call <vscale x 2 x double> @llvm.vp.fma.nxv2f64(<vscale x 2 x double> %nega, <vscale x 2 x double> %vbext, <vscale x 2 x double> %c, <vscale x 2 x i1> splat (i1 -1), i32 %evl)
893 ret <vscale x 2 x double> %v
896 declare <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x i1>, i32)
897 declare <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double>, <vscale x 4 x i1>, i32)
898 declare <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, i32)
900 define <vscale x 4 x double> @vfnmsac_vv_nxv4f64(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
901 ; CHECK-LABEL: vfnmsac_vv_nxv4f64:
903 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
904 ; CHECK-NEXT: vfwnmsac.vv v12, v8, v10, v0.t
905 ; CHECK-NEXT: vmv4r.v v8, v12
907 %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %m, i32 %evl)
908 %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
909 %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> %m, i32 %evl)
910 %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %bext, <vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
911 ret <vscale x 4 x double> %v
914 define <vscale x 4 x double> @vfnmsac_vv_nxv4f64_unmasked(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x double> %c, i32 zeroext %evl) {
915 ; CHECK-LABEL: vfnmsac_vv_nxv4f64_unmasked:
917 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
918 ; CHECK-NEXT: vfwnmsac.vv v12, v8, v10
919 ; CHECK-NEXT: vmv4r.v v8, v12
921 %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
922 %bext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %b, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
923 %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
924 %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %bext, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
925 ret <vscale x 4 x double> %v
928 define <vscale x 4 x double> @vfnmsac_vf_nxv4f64(<vscale x 4 x float> %a, float %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
929 ; CHECK-LABEL: vfnmsac_vf_nxv4f64:
931 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
932 ; CHECK-NEXT: vfwnmsac.vf v12, fa0, v8, v0.t
933 ; CHECK-NEXT: vmv4r.v v8, v12
935 %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
936 %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
937 %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %m, i32 %evl)
938 %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
939 %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> %m, i32 %evl)
940 %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vbext, <vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
941 ret <vscale x 4 x double> %v
944 define <vscale x 4 x double> @vfnmsac_vf_nxv4f64_commute(<vscale x 4 x float> %a, float %b, <vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 zeroext %evl) {
945 ; CHECK-LABEL: vfnmsac_vf_nxv4f64_commute:
947 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
948 ; CHECK-NEXT: vfwnmsac.vf v12, fa0, v8, v0.t
949 ; CHECK-NEXT: vmv4r.v v8, v12
951 %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
952 %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
953 %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> %m, i32 %evl)
954 %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
955 %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> %m, i32 %evl)
956 %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %vbext, <vscale x 4 x double> %nega, <vscale x 4 x double> %c, <vscale x 4 x i1> %m, i32 %evl)
957 ret <vscale x 4 x double> %v
960 define <vscale x 4 x double> @vfnmsac_vf_nxv4f64_unmasked(<vscale x 4 x float> %a, float %b, <vscale x 4 x double> %c, i32 zeroext %evl) {
961 ; CHECK-LABEL: vfnmsac_vf_nxv4f64_unmasked:
963 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
964 ; CHECK-NEXT: vfwnmsac.vf v12, fa0, v8
965 ; CHECK-NEXT: vmv4r.v v8, v12
967 %elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
968 %vb = shufflevector <vscale x 4 x float> %elt.head, <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
969 %aext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
970 %vbext = call <vscale x 4 x double> @llvm.vp.fpext.nxv4f64.nxv4f32(<vscale x 4 x float> %vb, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
971 %nega = call <vscale x 4 x double> @llvm.vp.fneg.nxv4f64(<vscale x 4 x double> %aext, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
972 %v = call <vscale x 4 x double> @llvm.vp.fma.nxv4f64(<vscale x 4 x double> %nega, <vscale x 4 x double> %vbext, <vscale x 4 x double> %c, <vscale x 4 x i1> splat (i1 -1), i32 %evl)
973 ret <vscale x 4 x double> %v
976 declare <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x i1>, i32)
977 declare <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double>, <vscale x 8 x i1>, i32)
978 declare <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float>, <vscale x 8 x i1>, i32)
980 define <vscale x 8 x double> @vfnmsac_vv_nxv8f64(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
981 ; CHECK-LABEL: vfnmsac_vv_nxv8f64:
983 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
984 ; CHECK-NEXT: vfwnmsac.vv v16, v8, v12, v0.t
985 ; CHECK-NEXT: vmv8r.v v8, v16
987 %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %m, i32 %evl)
988 %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
989 %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> %m, i32 %evl)
990 %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %bext, <vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
991 ret <vscale x 8 x double> %v
994 define <vscale x 8 x double> @vfnmsac_vv_nxv8f64_unmasked(<vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x double> %c, i32 zeroext %evl) {
995 ; CHECK-LABEL: vfnmsac_vv_nxv8f64_unmasked:
997 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
998 ; CHECK-NEXT: vfwnmsac.vv v16, v8, v12
999 ; CHECK-NEXT: vmv8r.v v8, v16
1001 %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1002 %bext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %b, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1003 %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1004 %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %bext, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1005 ret <vscale x 8 x double> %v
1008 define <vscale x 8 x double> @vfnmsac_vf_nxv8f64(<vscale x 8 x float> %a, float %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1009 ; CHECK-LABEL: vfnmsac_vf_nxv8f64:
1011 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1012 ; CHECK-NEXT: vfwnmsac.vf v16, fa0, v8, v0.t
1013 ; CHECK-NEXT: vmv8r.v v8, v16
1015 %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
1016 %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
1017 %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %m, i32 %evl)
1018 %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
1019 %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> %m, i32 %evl)
1020 %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vbext, <vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
1021 ret <vscale x 8 x double> %v
1024 define <vscale x 8 x double> @vfnmsac_vf_nxv8f64_commute(<vscale x 8 x float> %a, float %b, <vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 zeroext %evl) {
1025 ; CHECK-LABEL: vfnmsac_vf_nxv8f64_commute:
1027 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1028 ; CHECK-NEXT: vfwnmsac.vf v16, fa0, v8, v0.t
1029 ; CHECK-NEXT: vmv8r.v v8, v16
1031 %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
1032 %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
1033 %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> %m, i32 %evl)
1034 %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
1035 %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> %m, i32 %evl)
1036 %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %vbext, <vscale x 8 x double> %nega, <vscale x 8 x double> %c, <vscale x 8 x i1> %m, i32 %evl)
1037 ret <vscale x 8 x double> %v
1040 define <vscale x 8 x double> @vfnmsac_vf_nxv8f64_unmasked(<vscale x 8 x float> %a, float %b, <vscale x 8 x double> %c, i32 zeroext %evl) {
1041 ; CHECK-LABEL: vfnmsac_vf_nxv8f64_unmasked:
1043 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1044 ; CHECK-NEXT: vfwnmsac.vf v16, fa0, v8
1045 ; CHECK-NEXT: vmv8r.v v8, v16
1047 %elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
1048 %vb = shufflevector <vscale x 8 x float> %elt.head, <vscale x 8 x float> poison, <vscale x 8 x i32> zeroinitializer
1049 %aext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %a, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1050 %vbext = call <vscale x 8 x double> @llvm.vp.fpext.nxv8f64.nxv8f32(<vscale x 8 x float> %vb, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1051 %nega = call <vscale x 8 x double> @llvm.vp.fneg.nxv8f64(<vscale x 8 x double> %aext, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1052 %v = call <vscale x 8 x double> @llvm.vp.fma.nxv8f64(<vscale x 8 x double> %nega, <vscale x 8 x double> %vbext, <vscale x 8 x double> %c, <vscale x 8 x i1> splat (i1 -1), i32 %evl)
1053 ret <vscale x 8 x double> %v