1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \
5 ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
7 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
12 define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x i32> %0, iXLen %1) nounwind {
13 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
14 ; CHECK: # %bb.0: # %entry
15 ; CHECK-NEXT: fsrmi a1, 0
16 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
17 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
19 ; CHECK-NEXT: vmv1r.v v8, v9
22 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
23 <vscale x 1 x half> undef,
24 <vscale x 1 x i32> %0,
27 ret <vscale x 1 x half> %a
30 declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
36 define <vscale x 1 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
37 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f16_nxv1i32:
38 ; CHECK: # %bb.0: # %entry
39 ; CHECK-NEXT: fsrmi a1, 0
40 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
41 ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t
45 %a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f16.nxv1i32(
46 <vscale x 1 x half> %0,
47 <vscale x 1 x i32> %1,
49 iXLen 0, iXLen %3, iXLen 1)
51 ret <vscale x 1 x half> %a
54 declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
59 define <vscale x 2 x half> @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32(<vscale x 2 x i32> %0, iXLen %1) nounwind {
60 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: fsrmi a1, 0
63 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
64 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
66 ; CHECK-NEXT: vmv1r.v v8, v9
69 %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
70 <vscale x 2 x half> undef,
71 <vscale x 2 x i32> %0,
74 ret <vscale x 2 x half> %a
77 declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
83 define <vscale x 2 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32(<vscale x 2 x half> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
84 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f16_nxv2i32:
85 ; CHECK: # %bb.0: # %entry
86 ; CHECK-NEXT: fsrmi a1, 0
87 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
88 ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t
92 %a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f16.nxv2i32(
93 <vscale x 2 x half> %0,
94 <vscale x 2 x i32> %1,
96 iXLen 0, iXLen %3, iXLen 1)
98 ret <vscale x 2 x half> %a
101 declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
106 define <vscale x 4 x half> @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32(<vscale x 4 x i32> %0, iXLen %1) nounwind {
107 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32:
108 ; CHECK: # %bb.0: # %entry
109 ; CHECK-NEXT: fsrmi a1, 0
110 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
111 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
112 ; CHECK-NEXT: fsrm a1
113 ; CHECK-NEXT: vmv.v.v v8, v10
116 %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
117 <vscale x 4 x half> undef,
118 <vscale x 4 x i32> %0,
121 ret <vscale x 4 x half> %a
124 declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
128 iXLen, iXLen, iXLen);
130 define <vscale x 4 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32(<vscale x 4 x half> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
131 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f16_nxv4i32:
132 ; CHECK: # %bb.0: # %entry
133 ; CHECK-NEXT: fsrmi a1, 0
134 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
135 ; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t
136 ; CHECK-NEXT: fsrm a1
139 %a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32(
140 <vscale x 4 x half> %0,
141 <vscale x 4 x i32> %1,
142 <vscale x 4 x i1> %2,
143 iXLen 0, iXLen %3, iXLen 1)
145 ret <vscale x 4 x half> %a
148 declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
153 define <vscale x 8 x half> @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32(<vscale x 8 x i32> %0, iXLen %1) nounwind {
154 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32:
155 ; CHECK: # %bb.0: # %entry
156 ; CHECK-NEXT: fsrmi a1, 0
157 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
158 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
159 ; CHECK-NEXT: fsrm a1
160 ; CHECK-NEXT: vmv.v.v v8, v12
163 %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
164 <vscale x 8 x half> undef,
165 <vscale x 8 x i32> %0,
168 ret <vscale x 8 x half> %a
171 declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
175 iXLen, iXLen, iXLen);
177 define <vscale x 8 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32(<vscale x 8 x half> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
178 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f16_nxv8i32:
179 ; CHECK: # %bb.0: # %entry
180 ; CHECK-NEXT: fsrmi a1, 0
181 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
182 ; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t
183 ; CHECK-NEXT: fsrm a1
186 %a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32(
187 <vscale x 8 x half> %0,
188 <vscale x 8 x i32> %1,
189 <vscale x 8 x i1> %2,
190 iXLen 0, iXLen %3, iXLen 1)
192 ret <vscale x 8 x half> %a
195 declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
196 <vscale x 16 x half>,
200 define <vscale x 16 x half> @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32(<vscale x 16 x i32> %0, iXLen %1) nounwind {
201 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32:
202 ; CHECK: # %bb.0: # %entry
203 ; CHECK-NEXT: fsrmi a1, 0
204 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
205 ; CHECK-NEXT: vfncvt.f.xu.w v16, v8
206 ; CHECK-NEXT: fsrm a1
207 ; CHECK-NEXT: vmv.v.v v8, v16
210 %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
211 <vscale x 16 x half> undef,
212 <vscale x 16 x i32> %0,
215 ret <vscale x 16 x half> %a
218 declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
219 <vscale x 16 x half>,
222 iXLen, iXLen, iXLen);
224 define <vscale x 16 x half> @intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32(<vscale x 16 x half> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
225 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv16f16_nxv16i32:
226 ; CHECK: # %bb.0: # %entry
227 ; CHECK-NEXT: fsrmi a1, 0
228 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
229 ; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t
230 ; CHECK-NEXT: fsrm a1
233 %a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32(
234 <vscale x 16 x half> %0,
235 <vscale x 16 x i32> %1,
236 <vscale x 16 x i1> %2,
237 iXLen 0, iXLen %3, iXLen 1)
239 ret <vscale x 16 x half> %a
242 declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
243 <vscale x 1 x float>,
247 define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x i64> %0, iXLen %1) nounwind {
248 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64:
249 ; CHECK: # %bb.0: # %entry
250 ; CHECK-NEXT: fsrmi a1, 0
251 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
252 ; CHECK-NEXT: vfncvt.f.xu.w v9, v8
253 ; CHECK-NEXT: fsrm a1
254 ; CHECK-NEXT: vmv1r.v v8, v9
257 %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
258 <vscale x 1 x float> undef,
259 <vscale x 1 x i64> %0,
262 ret <vscale x 1 x float> %a
265 declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
266 <vscale x 1 x float>,
269 iXLen, iXLen, iXLen);
271 define <vscale x 1 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
272 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv1f32_nxv1i64:
273 ; CHECK: # %bb.0: # %entry
274 ; CHECK-NEXT: fsrmi a1, 0
275 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
276 ; CHECK-NEXT: vfncvt.f.xu.w v8, v9, v0.t
277 ; CHECK-NEXT: fsrm a1
280 %a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv1f32.nxv1i64(
281 <vscale x 1 x float> %0,
282 <vscale x 1 x i64> %1,
283 <vscale x 1 x i1> %2,
284 iXLen 0, iXLen %3, iXLen 1)
286 ret <vscale x 1 x float> %a
289 declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
290 <vscale x 2 x float>,
294 define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x i64> %0, iXLen %1) nounwind {
295 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64:
296 ; CHECK: # %bb.0: # %entry
297 ; CHECK-NEXT: fsrmi a1, 0
298 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
299 ; CHECK-NEXT: vfncvt.f.xu.w v10, v8
300 ; CHECK-NEXT: fsrm a1
301 ; CHECK-NEXT: vmv.v.v v8, v10
304 %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
305 <vscale x 2 x float> undef,
306 <vscale x 2 x i64> %0,
309 ret <vscale x 2 x float> %a
312 declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
313 <vscale x 2 x float>,
316 iXLen, iXLen, iXLen);
318 define <vscale x 2 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
319 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv2f32_nxv2i64:
320 ; CHECK: # %bb.0: # %entry
321 ; CHECK-NEXT: fsrmi a1, 0
322 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
323 ; CHECK-NEXT: vfncvt.f.xu.w v8, v10, v0.t
324 ; CHECK-NEXT: fsrm a1
327 %a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64(
328 <vscale x 2 x float> %0,
329 <vscale x 2 x i64> %1,
330 <vscale x 2 x i1> %2,
331 iXLen 0, iXLen %3, iXLen 1)
333 ret <vscale x 2 x float> %a
336 declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
337 <vscale x 4 x float>,
341 define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x i64> %0, iXLen %1) nounwind {
342 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64:
343 ; CHECK: # %bb.0: # %entry
344 ; CHECK-NEXT: fsrmi a1, 0
345 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
346 ; CHECK-NEXT: vfncvt.f.xu.w v12, v8
347 ; CHECK-NEXT: fsrm a1
348 ; CHECK-NEXT: vmv.v.v v8, v12
351 %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
352 <vscale x 4 x float> undef,
353 <vscale x 4 x i64> %0,
356 ret <vscale x 4 x float> %a
359 declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
360 <vscale x 4 x float>,
363 iXLen, iXLen, iXLen);
365 define <vscale x 4 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
366 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv4f32_nxv4i64:
367 ; CHECK: # %bb.0: # %entry
368 ; CHECK-NEXT: fsrmi a1, 0
369 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
370 ; CHECK-NEXT: vfncvt.f.xu.w v8, v12, v0.t
371 ; CHECK-NEXT: fsrm a1
374 %a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64(
375 <vscale x 4 x float> %0,
376 <vscale x 4 x i64> %1,
377 <vscale x 4 x i1> %2,
378 iXLen 0, iXLen %3, iXLen 1)
380 ret <vscale x 4 x float> %a
383 declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
384 <vscale x 8 x float>,
388 define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x i64> %0, iXLen %1) nounwind {
389 ; CHECK-LABEL: intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64:
390 ; CHECK: # %bb.0: # %entry
391 ; CHECK-NEXT: fsrmi a1, 0
392 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
393 ; CHECK-NEXT: vfncvt.f.xu.w v16, v8
394 ; CHECK-NEXT: fsrm a1
395 ; CHECK-NEXT: vmv.v.v v8, v16
398 %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
399 <vscale x 8 x float> undef,
400 <vscale x 8 x i64> %0,
403 ret <vscale x 8 x float> %a
406 declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
407 <vscale x 8 x float>,
410 iXLen, iXLen, iXLen);
412 define <vscale x 8 x float> @intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
413 ; CHECK-LABEL: intrinsic_vfncvt_mask_f.xu.w_nxv8f32_nxv8i64:
414 ; CHECK: # %bb.0: # %entry
415 ; CHECK-NEXT: fsrmi a1, 0
416 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
417 ; CHECK-NEXT: vfncvt.f.xu.w v8, v16, v0.t
418 ; CHECK-NEXT: fsrm a1
421 %a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64(
422 <vscale x 8 x float> %0,
423 <vscale x 8 x i64> %1,
424 <vscale x 8 x i1> %2,
425 iXLen 0, iXLen %3, iXLen 1)
427 ret <vscale x 8 x float> %a