1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64
7 declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, half, iXLen)
9 define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
10 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
11 ; CHECK: # %bb.0: # %entry
12 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
13 ; CHECK-NEXT: vfmv.s.f v8, fa0
16 %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half %1, iXLen %2)
17 ret <vscale x 1 x half> %a
20 declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, half, iXLen)
22 define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
23 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
24 ; CHECK: # %bb.0: # %entry
25 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
26 ; CHECK-NEXT: vfmv.s.f v8, fa0
29 %a = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half> %0, half %1, iXLen %2)
30 ret <vscale x 2 x half> %a
33 declare <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half>, half, iXLen)
35 define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, iXLen %2) nounwind {
36 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
37 ; CHECK: # %bb.0: # %entry
38 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
39 ; CHECK-NEXT: vfmv.s.f v8, fa0
42 %a = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half> %0, half %1, iXLen %2)
43 ret <vscale x 4 x half> %a
46 declare <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half>, half, iXLen)
48 define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, iXLen %2) nounwind {
49 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
50 ; CHECK: # %bb.0: # %entry
51 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
52 ; CHECK-NEXT: vfmv.s.f v8, fa0
55 %a = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half> %0, half %1, iXLen %2)
56 ret <vscale x 8 x half> %a
59 declare <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half>, half, iXLen)
61 define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, iXLen %2) nounwind {
62 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
63 ; CHECK: # %bb.0: # %entry
64 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
65 ; CHECK-NEXT: vfmv.s.f v8, fa0
68 %a = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half> %0, half %1, iXLen %2)
69 ret <vscale x 16 x half> %a
72 declare <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half>, half, iXLen)
74 define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, iXLen %2) nounwind {
75 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
76 ; CHECK: # %bb.0: # %entry
77 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
78 ; CHECK-NEXT: vfmv.s.f v8, fa0
81 %a = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half> %0, half %1, iXLen %2)
82 ret <vscale x 32 x half> %a
85 declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>, float, iXLen)
87 define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
88 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
89 ; CHECK: # %bb.0: # %entry
90 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
91 ; CHECK-NEXT: vfmv.s.f v8, fa0
94 %a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2)
95 ret <vscale x 1 x float> %a
98 declare <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float>, float, iXLen)
100 define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2) nounwind {
101 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
102 ; CHECK: # %bb.0: # %entry
103 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
104 ; CHECK-NEXT: vfmv.s.f v8, fa0
107 %a = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float> %0, float %1, iXLen %2)
108 ret <vscale x 2 x float> %a
111 declare <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float>, float, iXLen)
113 define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2) nounwind {
114 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
115 ; CHECK: # %bb.0: # %entry
116 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
117 ; CHECK-NEXT: vfmv.s.f v8, fa0
120 %a = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float> %0, float %1, iXLen %2)
121 ret <vscale x 4 x float> %a
124 declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float>, float, iXLen)
126 define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2) nounwind {
127 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
128 ; CHECK: # %bb.0: # %entry
129 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
130 ; CHECK-NEXT: vfmv.s.f v8, fa0
133 %a = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float> %0, float %1, iXLen %2)
134 ret <vscale x 8 x float> %a
137 declare <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float>, float, iXLen)
139 define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2) nounwind {
140 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
141 ; CHECK: # %bb.0: # %entry
142 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
143 ; CHECK-NEXT: vfmv.s.f v8, fa0
146 %a = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float> %0, float %1, iXLen %2)
147 ret <vscale x 16 x float> %a
150 declare <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double>, double, iXLen)
152 define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, iXLen %2) nounwind {
153 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
154 ; CHECK: # %bb.0: # %entry
155 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
156 ; CHECK-NEXT: vfmv.s.f v8, fa0
159 %a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double %1, iXLen %2)
160 ret <vscale x 1 x double> %a
163 declare <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double>, double, iXLen)
165 define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, iXLen %2) nounwind {
166 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
167 ; CHECK: # %bb.0: # %entry
168 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
169 ; CHECK-NEXT: vfmv.s.f v8, fa0
172 %a = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double> %0, double %1, iXLen %2)
173 ret <vscale x 2 x double> %a
176 declare <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double>, double, iXLen)
178 define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, iXLen %2) nounwind {
179 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
182 ; CHECK-NEXT: vfmv.s.f v8, fa0
185 %a = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double> %0, double %1, iXLen %2)
186 ret <vscale x 4 x double> %a
189 declare <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double>, double, iXLen)
191 define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, iXLen %2) nounwind {
192 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
193 ; CHECK: # %bb.0: # %entry
194 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
195 ; CHECK-NEXT: vfmv.s.f v8, fa0
198 %a = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> %0, double %1, iXLen %2)
199 ret <vscale x 8 x double> %a
202 define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_zero_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
203 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1f16:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
206 ; CHECK-NEXT: vmv.s.x v8, zero
209 %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half 0.0, iXLen %1)
210 ret <vscale x 1 x half> %a
213 define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_zero_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
214 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2f16:
215 ; CHECK: # %bb.0: # %entry
216 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
217 ; CHECK-NEXT: vmv.s.x v8, zero
220 %a = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half> %0, half 0.0, iXLen %1)
221 ret <vscale x 2 x half> %a
224 define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_zero_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
225 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4f16:
226 ; CHECK: # %bb.0: # %entry
227 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
228 ; CHECK-NEXT: vmv.s.x v8, zero
231 %a = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16(<vscale x 4 x half> %0, half 0.0, iXLen %1)
232 ret <vscale x 4 x half> %a
235 define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_zero_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
236 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8f16:
237 ; CHECK: # %bb.0: # %entry
238 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
239 ; CHECK-NEXT: vmv.s.x v8, zero
242 %a = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16(<vscale x 8 x half> %0, half 0.0, iXLen %1)
243 ret <vscale x 8 x half> %a
246 define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_zero_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
247 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv16f16:
248 ; CHECK: # %bb.0: # %entry
249 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
250 ; CHECK-NEXT: vmv.s.x v8, zero
253 %a = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16(<vscale x 16 x half> %0, half 0.0, iXLen %1)
254 ret <vscale x 16 x half> %a
257 define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_zero_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
258 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv32f16:
259 ; CHECK: # %bb.0: # %entry
260 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
261 ; CHECK-NEXT: vmv.s.x v8, zero
264 %a = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16(<vscale x 32 x half> %0, half 0.0, iXLen %1)
265 ret <vscale x 32 x half> %a
268 define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_zero_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
269 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1f32:
270 ; CHECK: # %bb.0: # %entry
271 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
272 ; CHECK-NEXT: vmv.s.x v8, zero
275 %a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float 0.0, iXLen %1)
276 ret <vscale x 1 x float> %a
279 define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_zero_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
280 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2f32:
281 ; CHECK: # %bb.0: # %entry
282 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
283 ; CHECK-NEXT: vmv.s.x v8, zero
286 %a = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32(<vscale x 2 x float> %0, float 0.0, iXLen %1)
287 ret <vscale x 2 x float> %a
290 define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_zero_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
291 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4f32:
292 ; CHECK: # %bb.0: # %entry
293 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
294 ; CHECK-NEXT: vmv.s.x v8, zero
297 %a = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32(<vscale x 4 x float> %0, float 0.0, iXLen %1)
298 ret <vscale x 4 x float> %a
301 define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_zero_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
302 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8f32:
303 ; CHECK: # %bb.0: # %entry
304 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
305 ; CHECK-NEXT: vmv.s.x v8, zero
308 %a = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32(<vscale x 8 x float> %0, float 0.0, iXLen %1)
309 ret <vscale x 8 x float> %a
312 define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_zero_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
313 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv16f32:
314 ; CHECK: # %bb.0: # %entry
315 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
316 ; CHECK-NEXT: vmv.s.x v8, zero
319 %a = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32(<vscale x 16 x float> %0, float 0.0, iXLen %1)
320 ret <vscale x 16 x float> %a
323 define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_zero_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
324 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv1f64:
325 ; CHECK: # %bb.0: # %entry
326 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
327 ; CHECK-NEXT: vmv.s.x v8, zero
330 %a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double 0.0, iXLen %1)
331 ret <vscale x 1 x double> %a
334 define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_zero_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
335 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv2f64:
336 ; CHECK: # %bb.0: # %entry
337 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
338 ; CHECK-NEXT: vmv.s.x v8, zero
341 %a = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64(<vscale x 2 x double> %0, double 0.0, iXLen %1)
342 ret <vscale x 2 x double> %a
345 define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_zero_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
346 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv4f64:
347 ; CHECK: # %bb.0: # %entry
348 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
349 ; CHECK-NEXT: vmv.s.x v8, zero
352 %a = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64(<vscale x 4 x double> %0, double 0.0, iXLen %1)
353 ret <vscale x 4 x double> %a
356 define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_zero_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
357 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_zero_nxv8f64:
358 ; CHECK: # %bb.0: # %entry
359 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, tu, ma
360 ; CHECK-NEXT: vmv.s.x v8, zero
363 %a = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64(<vscale x 8 x double> %0, double 0.0, iXLen %1)
364 ret <vscale x 8 x double> %a
367 define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16_negzero(<vscale x 1 x half> %0, iXLen %1) nounwind {
368 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16_negzero:
369 ; CHECK: # %bb.0: # %entry
370 ; CHECK-NEXT: lui a1, 1048568
371 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
372 ; CHECK-NEXT: vmv.s.x v8, a1
375 %a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half -0.0, iXLen %1)
376 ret <vscale x 1 x half> %a
379 define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32_negzero(<vscale x 1 x float> %0, iXLen %1) nounwind {
380 ; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32_negzero:
381 ; CHECK: # %bb.0: # %entry
382 ; CHECK-NEXT: lui a1, 524288
383 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
384 ; CHECK-NEXT: vmv.s.x v8, a1
387 %a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float -0.0, iXLen %1)
388 ret <vscale x 1 x float> %a
391 define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64_negzero(<vscale x 1 x double> %0, iXLen %1) nounwind {
392 ; RV32-LABEL: intrinsic_vfmv.s.f_f_nxv1f64_negzero:
393 ; RV32: # %bb.0: # %entry
394 ; RV32-NEXT: fcvt.d.w fa5, zero
395 ; RV32-NEXT: fneg.d fa5, fa5
396 ; RV32-NEXT: vsetvli zero, a0, e64, m1, tu, ma
397 ; RV32-NEXT: vfmv.s.f v8, fa5
400 ; RV64-LABEL: intrinsic_vfmv.s.f_f_nxv1f64_negzero:
401 ; RV64: # %bb.0: # %entry
402 ; RV64-NEXT: li a1, -1
403 ; RV64-NEXT: slli a1, a1, 63
404 ; RV64-NEXT: vsetvli zero, a0, e64, m1, tu, ma
405 ; RV64-NEXT: vmv.s.x v8, a1
408 %a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double -0.0, iXLen %1)
409 ret <vscale x 1 x double> %a