1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
12 define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x half> %0, iXLen %1) nounwind {
13 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: fsrmi a1, 0
16 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
17 ; CHECK-NEXT: vfncvt.xu.f.w v9, v8
19 ; CHECK-NEXT: vmv1r.v v8, v9
22 %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
23 <vscale x 1 x i8> undef,
24 <vscale x 1 x half> %0,
27 ret <vscale x 1 x i8> %a
30 declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
36 define <vscale x 1 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
37 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: fsrmi a1, 0
40 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
41 ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
45 %a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16(
47 <vscale x 1 x half> %1,
49 iXLen 0, iXLen %3, iXLen 1)
51 ret <vscale x 1 x i8> %a
54 declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
59 define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x half> %0, iXLen %1) nounwind {
60 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: fsrmi a1, 0
63 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
64 ; CHECK-NEXT: vfncvt.xu.f.w v9, v8
66 ; CHECK-NEXT: vmv1r.v v8, v9
69 %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
70 <vscale x 2 x i8> undef,
71 <vscale x 2 x half> %0,
74 ret <vscale x 2 x i8> %a
77 declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
83 define <vscale x 2 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x i8> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
84 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i8_nxv2f16:
85 ; CHECK: # %bb.0: # %entry
86 ; CHECK-NEXT: fsrmi a1, 0
87 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
88 ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
92 %a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i8.nxv2f16(
94 <vscale x 2 x half> %1,
96 iXLen 0, iXLen %3, iXLen 1)
98 ret <vscale x 2 x i8> %a
101 declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
106 define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x half> %0, iXLen %1) nounwind {
107 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16:
108 ; CHECK: # %bb.0: # %entry
109 ; CHECK-NEXT: fsrmi a1, 0
110 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
111 ; CHECK-NEXT: vfncvt.xu.f.w v9, v8
112 ; CHECK-NEXT: fsrm a1
113 ; CHECK-NEXT: vmv1r.v v8, v9
116 %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
117 <vscale x 4 x i8> undef,
118 <vscale x 4 x half> %0,
121 ret <vscale x 4 x i8> %a
124 declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
128 iXLen, iXLen, iXLen);
130 define <vscale x 4 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x i8> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
131 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i8_nxv4f16:
132 ; CHECK: # %bb.0: # %entry
133 ; CHECK-NEXT: fsrmi a1, 0
134 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
135 ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
136 ; CHECK-NEXT: fsrm a1
139 %a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i8.nxv4f16(
140 <vscale x 4 x i8> %0,
141 <vscale x 4 x half> %1,
142 <vscale x 4 x i1> %2,
143 iXLen 0, iXLen %3, iXLen 1)
145 ret <vscale x 4 x i8> %a
148 declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
153 define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x half> %0, iXLen %1) nounwind {
154 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16:
155 ; CHECK: # %bb.0: # %entry
156 ; CHECK-NEXT: fsrmi a1, 0
157 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
158 ; CHECK-NEXT: vfncvt.xu.f.w v10, v8
159 ; CHECK-NEXT: fsrm a1
160 ; CHECK-NEXT: vmv.v.v v8, v10
163 %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
164 <vscale x 8 x i8> undef,
165 <vscale x 8 x half> %0,
168 ret <vscale x 8 x i8> %a
171 declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
175 iXLen, iXLen, iXLen);
177 define <vscale x 8 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x i8> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
178 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i8_nxv8f16:
179 ; CHECK: # %bb.0: # %entry
180 ; CHECK-NEXT: fsrmi a1, 0
181 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
182 ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t
183 ; CHECK-NEXT: fsrm a1
186 %a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16(
187 <vscale x 8 x i8> %0,
188 <vscale x 8 x half> %1,
189 <vscale x 8 x i1> %2,
190 iXLen 0, iXLen %3, iXLen 1)
192 ret <vscale x 8 x i8> %a
195 declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
197 <vscale x 16 x half>,
200 define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x half> %0, iXLen %1) nounwind {
201 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16:
202 ; CHECK: # %bb.0: # %entry
203 ; CHECK-NEXT: fsrmi a1, 0
204 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
205 ; CHECK-NEXT: vfncvt.xu.f.w v12, v8
206 ; CHECK-NEXT: fsrm a1
207 ; CHECK-NEXT: vmv.v.v v8, v12
210 %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
211 <vscale x 16 x i8> undef,
212 <vscale x 16 x half> %0,
215 ret <vscale x 16 x i8> %a
218 declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
220 <vscale x 16 x half>,
222 iXLen, iXLen, iXLen);
224 define <vscale x 16 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16(<vscale x 16 x i8> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
225 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i8_nxv16f16:
226 ; CHECK: # %bb.0: # %entry
227 ; CHECK-NEXT: fsrmi a1, 0
228 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
229 ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t
230 ; CHECK-NEXT: fsrm a1
233 %a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16(
234 <vscale x 16 x i8> %0,
235 <vscale x 16 x half> %1,
236 <vscale x 16 x i1> %2,
237 iXLen 0, iXLen %3, iXLen 1)
239 ret <vscale x 16 x i8> %a
242 declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
244 <vscale x 32 x half>,
247 define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x half> %0, iXLen %1) nounwind {
248 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16:
249 ; CHECK: # %bb.0: # %entry
250 ; CHECK-NEXT: fsrmi a1, 0
251 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
252 ; CHECK-NEXT: vfncvt.xu.f.w v16, v8
253 ; CHECK-NEXT: fsrm a1
254 ; CHECK-NEXT: vmv.v.v v8, v16
257 %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
258 <vscale x 32 x i8> undef,
259 <vscale x 32 x half> %0,
262 ret <vscale x 32 x i8> %a
265 declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
267 <vscale x 32 x half>,
269 iXLen, iXLen, iXLen);
271 define <vscale x 32 x i8> @intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16(<vscale x 32 x i8> %0, <vscale x 32 x half> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
272 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv32i8_nxv32f16:
273 ; CHECK: # %bb.0: # %entry
274 ; CHECK-NEXT: fsrmi a1, 0
275 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
276 ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t
277 ; CHECK-NEXT: fsrm a1
280 %a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16(
281 <vscale x 32 x i8> %0,
282 <vscale x 32 x half> %1,
283 <vscale x 32 x i1> %2,
284 iXLen 0, iXLen %3, iXLen 1)
286 ret <vscale x 32 x i8> %a
289 declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
291 <vscale x 1 x float>,
294 define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x float> %0, iXLen %1) nounwind {
295 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: fsrmi a1, 0
298 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
299 ; CHECK-NEXT: vfncvt.xu.f.w v9, v8
300 ; CHECK-NEXT: fsrm a1
301 ; CHECK-NEXT: vmv1r.v v8, v9
304 %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
305 <vscale x 1 x i16> undef,
306 <vscale x 1 x float> %0,
309 ret <vscale x 1 x i16> %a
312 declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
314 <vscale x 1 x float>,
316 iXLen, iXLen, iXLen);
318 define <vscale x 1 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x i16> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
319 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i16_nxv1f32:
320 ; CHECK: # %bb.0: # %entry
321 ; CHECK-NEXT: fsrmi a1, 0
322 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
323 ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
324 ; CHECK-NEXT: fsrm a1
327 %a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i16.nxv1f32(
328 <vscale x 1 x i16> %0,
329 <vscale x 1 x float> %1,
330 <vscale x 1 x i1> %2,
331 iXLen 0, iXLen %3, iXLen 1)
333 ret <vscale x 1 x i16> %a
336 declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
338 <vscale x 2 x float>,
341 define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x float> %0, iXLen %1) nounwind {
342 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32:
343 ; CHECK: # %bb.0: # %entry
344 ; CHECK-NEXT: fsrmi a1, 0
345 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
346 ; CHECK-NEXT: vfncvt.xu.f.w v9, v8
347 ; CHECK-NEXT: fsrm a1
348 ; CHECK-NEXT: vmv1r.v v8, v9
351 %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
352 <vscale x 2 x i16> undef,
353 <vscale x 2 x float> %0,
356 ret <vscale x 2 x i16> %a
359 declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
361 <vscale x 2 x float>,
363 iXLen, iXLen, iXLen);
365 define <vscale x 2 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x i16> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
366 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i16_nxv2f32:
367 ; CHECK: # %bb.0: # %entry
368 ; CHECK-NEXT: fsrmi a1, 0
369 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
370 ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
371 ; CHECK-NEXT: fsrm a1
374 %a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i16.nxv2f32(
375 <vscale x 2 x i16> %0,
376 <vscale x 2 x float> %1,
377 <vscale x 2 x i1> %2,
378 iXLen 0, iXLen %3, iXLen 1)
380 ret <vscale x 2 x i16> %a
383 declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
385 <vscale x 4 x float>,
388 define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x float> %0, iXLen %1) nounwind {
389 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32:
390 ; CHECK: # %bb.0: # %entry
391 ; CHECK-NEXT: fsrmi a1, 0
392 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
393 ; CHECK-NEXT: vfncvt.xu.f.w v10, v8
394 ; CHECK-NEXT: fsrm a1
395 ; CHECK-NEXT: vmv.v.v v8, v10
398 %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
399 <vscale x 4 x i16> undef,
400 <vscale x 4 x float> %0,
403 ret <vscale x 4 x i16> %a
406 declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
408 <vscale x 4 x float>,
410 iXLen, iXLen, iXLen);
412 define <vscale x 4 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x i16> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
413 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i16_nxv4f32:
414 ; CHECK: # %bb.0: # %entry
415 ; CHECK-NEXT: fsrmi a1, 0
416 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
417 ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t
418 ; CHECK-NEXT: fsrm a1
421 %a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32(
422 <vscale x 4 x i16> %0,
423 <vscale x 4 x float> %1,
424 <vscale x 4 x i1> %2,
425 iXLen 0, iXLen %3, iXLen 1)
427 ret <vscale x 4 x i16> %a
430 declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
432 <vscale x 8 x float>,
435 define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x float> %0, iXLen %1) nounwind {
436 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32:
437 ; CHECK: # %bb.0: # %entry
438 ; CHECK-NEXT: fsrmi a1, 0
439 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
440 ; CHECK-NEXT: vfncvt.xu.f.w v12, v8
441 ; CHECK-NEXT: fsrm a1
442 ; CHECK-NEXT: vmv.v.v v8, v12
445 %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
446 <vscale x 8 x i16> undef,
447 <vscale x 8 x float> %0,
450 ret <vscale x 8 x i16> %a
453 declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
455 <vscale x 8 x float>,
457 iXLen, iXLen, iXLen);
459 define <vscale x 8 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x i16> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
460 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i16_nxv8f32:
461 ; CHECK: # %bb.0: # %entry
462 ; CHECK-NEXT: fsrmi a1, 0
463 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
464 ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t
465 ; CHECK-NEXT: fsrm a1
468 %a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32(
469 <vscale x 8 x i16> %0,
470 <vscale x 8 x float> %1,
471 <vscale x 8 x i1> %2,
472 iXLen 0, iXLen %3, iXLen 1)
474 ret <vscale x 8 x i16> %a
477 declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
479 <vscale x 16 x float>,
482 define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x float> %0, iXLen %1) nounwind {
483 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32:
484 ; CHECK: # %bb.0: # %entry
485 ; CHECK-NEXT: fsrmi a1, 0
486 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
487 ; CHECK-NEXT: vfncvt.xu.f.w v16, v8
488 ; CHECK-NEXT: fsrm a1
489 ; CHECK-NEXT: vmv.v.v v8, v16
492 %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
493 <vscale x 16 x i16> undef,
494 <vscale x 16 x float> %0,
497 ret <vscale x 16 x i16> %a
500 declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
502 <vscale x 16 x float>,
504 iXLen, iXLen, iXLen);
506 define <vscale x 16 x i16> @intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32(<vscale x 16 x i16> %0, <vscale x 16 x float> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
507 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv16i16_nxv16f32:
508 ; CHECK: # %bb.0: # %entry
509 ; CHECK-NEXT: fsrmi a1, 0
510 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
511 ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t
512 ; CHECK-NEXT: fsrm a1
515 %a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32(
516 <vscale x 16 x i16> %0,
517 <vscale x 16 x float> %1,
518 <vscale x 16 x i1> %2,
519 iXLen 0, iXLen %3, iXLen 1)
521 ret <vscale x 16 x i16> %a
524 declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
526 <vscale x 1 x double>,
529 define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x double> %0, iXLen %1) nounwind {
530 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64:
531 ; CHECK: # %bb.0: # %entry
532 ; CHECK-NEXT: fsrmi a1, 0
533 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
534 ; CHECK-NEXT: vfncvt.xu.f.w v9, v8
535 ; CHECK-NEXT: fsrm a1
536 ; CHECK-NEXT: vmv1r.v v8, v9
539 %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
540 <vscale x 1 x i32> undef,
541 <vscale x 1 x double> %0,
544 ret <vscale x 1 x i32> %a
547 declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
549 <vscale x 1 x double>,
551 iXLen, iXLen, iXLen);
553 define <vscale x 1 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x i32> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
554 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i32_nxv1f64:
555 ; CHECK: # %bb.0: # %entry
556 ; CHECK-NEXT: fsrmi a1, 0
557 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
558 ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t
559 ; CHECK-NEXT: fsrm a1
562 %a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i32.nxv1f64(
563 <vscale x 1 x i32> %0,
564 <vscale x 1 x double> %1,
565 <vscale x 1 x i1> %2,
566 iXLen 0, iXLen %3, iXLen 1)
568 ret <vscale x 1 x i32> %a
571 declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
573 <vscale x 2 x double>,
576 define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x double> %0, iXLen %1) nounwind {
577 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64:
578 ; CHECK: # %bb.0: # %entry
579 ; CHECK-NEXT: fsrmi a1, 0
580 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
581 ; CHECK-NEXT: vfncvt.xu.f.w v10, v8
582 ; CHECK-NEXT: fsrm a1
583 ; CHECK-NEXT: vmv.v.v v8, v10
586 %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
587 <vscale x 2 x i32> undef,
588 <vscale x 2 x double> %0,
591 ret <vscale x 2 x i32> %a
594 declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
596 <vscale x 2 x double>,
598 iXLen, iXLen, iXLen);
600 define <vscale x 2 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x i32> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
601 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv2i32_nxv2f64:
602 ; CHECK: # %bb.0: # %entry
603 ; CHECK-NEXT: fsrmi a1, 0
604 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
605 ; CHECK-NEXT: vfncvt.xu.f.w v8, v10, v0.t
606 ; CHECK-NEXT: fsrm a1
609 %a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64(
610 <vscale x 2 x i32> %0,
611 <vscale x 2 x double> %1,
612 <vscale x 2 x i1> %2,
613 iXLen 0, iXLen %3, iXLen 1)
615 ret <vscale x 2 x i32> %a
618 declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
620 <vscale x 4 x double>,
623 define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x double> %0, iXLen %1) nounwind {
624 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64:
625 ; CHECK: # %bb.0: # %entry
626 ; CHECK-NEXT: fsrmi a1, 0
627 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
628 ; CHECK-NEXT: vfncvt.xu.f.w v12, v8
629 ; CHECK-NEXT: fsrm a1
630 ; CHECK-NEXT: vmv.v.v v8, v12
633 %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
634 <vscale x 4 x i32> undef,
635 <vscale x 4 x double> %0,
638 ret <vscale x 4 x i32> %a
641 declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
643 <vscale x 4 x double>,
645 iXLen, iXLen, iXLen);
647 define <vscale x 4 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x i32> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
648 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv4i32_nxv4f64:
649 ; CHECK: # %bb.0: # %entry
650 ; CHECK-NEXT: fsrmi a1, 0
651 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
652 ; CHECK-NEXT: vfncvt.xu.f.w v8, v12, v0.t
653 ; CHECK-NEXT: fsrm a1
656 %a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64(
657 <vscale x 4 x i32> %0,
658 <vscale x 4 x double> %1,
659 <vscale x 4 x i1> %2,
660 iXLen 0, iXLen %3, iXLen 1)
662 ret <vscale x 4 x i32> %a
665 declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
667 <vscale x 8 x double>,
670 define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x double> %0, iXLen %1) nounwind {
671 ; CHECK-LABEL: intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64:
672 ; CHECK: # %bb.0: # %entry
673 ; CHECK-NEXT: fsrmi a1, 0
674 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
675 ; CHECK-NEXT: vfncvt.xu.f.w v16, v8
676 ; CHECK-NEXT: fsrm a1
677 ; CHECK-NEXT: vmv.v.v v8, v16
680 %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
681 <vscale x 8 x i32> undef,
682 <vscale x 8 x double> %0,
685 ret <vscale x 8 x i32> %a
688 declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
690 <vscale x 8 x double>,
692 iXLen, iXLen, iXLen);
694 define <vscale x 8 x i32> @intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
695 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv8i32_nxv8f64:
696 ; CHECK: # %bb.0: # %entry
697 ; CHECK-NEXT: fsrmi a1, 0
698 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
699 ; CHECK-NEXT: vfncvt.xu.f.w v8, v16, v0.t
700 ; CHECK-NEXT: fsrm a1
703 %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
704 <vscale x 8 x i32> %0,
705 <vscale x 8 x double> %1,
706 <vscale x 8 x i1> %2,
707 iXLen 0, iXLen %3, iXLen 1)
709 ret <vscale x 8 x i32> %a
712 define <vscale x 8 x i32> @intrinsic_vfncvt_mask_xu.f.w_rtz_nxv8i32_nxv8f64(<vscale x 8 x i32> %0, <vscale x 8 x double> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
713 ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_rtz_nxv8i32_nxv8f64:
714 ; CHECK: # %bb.0: # %entry
715 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
716 ; CHECK-NEXT: vfncvt.rtz.xu.f.w v8, v16, v0.t
719 %a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64(
720 <vscale x 8 x i32> %0,
721 <vscale x 8 x double> %1,
722 <vscale x 8 x i1> %2,
723 iXLen 1, iXLen %3, iXLen 1)
725 ret <vscale x 8 x i32> %a