1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfwcvt_f_x_v_f16mf4_tu
11 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[VL]])
14 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
16 vfloat16mf4_t
test_vfwcvt_f_x_v_f16mf4_tu(vfloat16mf4_t maskedoff
, vint8mf8_t src
, size_t vl
) {
17 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
20 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfwcvt_f_x_v_f16mf2_tu
21 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[VL]])
24 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
26 vfloat16mf2_t
test_vfwcvt_f_x_v_f16mf2_tu(vfloat16mf2_t maskedoff
, vint8mf4_t src
, size_t vl
) {
27 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
30 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfwcvt_f_x_v_f16m1_tu
31 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[VL]])
34 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
36 vfloat16m1_t
test_vfwcvt_f_x_v_f16m1_tu(vfloat16m1_t maskedoff
, vint8mf2_t src
, size_t vl
) {
37 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
40 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfwcvt_f_x_v_f16m2_tu
41 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[VL]])
44 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
46 vfloat16m2_t
test_vfwcvt_f_x_v_f16m2_tu(vfloat16m2_t maskedoff
, vint8m1_t src
, size_t vl
) {
47 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
50 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfwcvt_f_x_v_f16m4_tu
51 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[VL]])
54 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
56 vfloat16m4_t
test_vfwcvt_f_x_v_f16m4_tu(vfloat16m4_t maskedoff
, vint8m2_t src
, size_t vl
) {
57 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
60 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfwcvt_f_x_v_f16m8_tu
61 // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[VL]])
64 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
66 vfloat16m8_t
test_vfwcvt_f_x_v_f16m8_tu(vfloat16m8_t maskedoff
, vint8m4_t src
, size_t vl
) {
67 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
70 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfwcvt_f_xu_v_f16mf4_tu
71 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], i64 [[VL]])
74 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
76 vfloat16mf4_t
test_vfwcvt_f_xu_v_f16mf4_tu(vfloat16mf4_t maskedoff
, vuint8mf8_t src
, size_t vl
) {
77 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
80 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfwcvt_f_xu_v_f16mf2_tu
81 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], i64 [[VL]])
84 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
86 vfloat16mf2_t
test_vfwcvt_f_xu_v_f16mf2_tu(vfloat16mf2_t maskedoff
, vuint8mf4_t src
, size_t vl
) {
87 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
90 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfwcvt_f_xu_v_f16m1_tu
91 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], i64 [[VL]])
94 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
96 vfloat16m1_t
test_vfwcvt_f_xu_v_f16m1_tu(vfloat16m1_t maskedoff
, vuint8mf2_t src
, size_t vl
) {
97 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
100 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfwcvt_f_xu_v_f16m2_tu
101 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], i64 [[VL]])
104 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
106 vfloat16m2_t
test_vfwcvt_f_xu_v_f16m2_tu(vfloat16m2_t maskedoff
, vuint8m1_t src
, size_t vl
) {
107 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
110 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfwcvt_f_xu_v_f16m4_tu
111 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], i64 [[VL]])
114 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
116 vfloat16m4_t
test_vfwcvt_f_xu_v_f16m4_tu(vfloat16m4_t maskedoff
, vuint8m2_t src
, size_t vl
) {
117 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
120 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfwcvt_f_xu_v_f16m8_tu
121 // CHECK-RV64-SAME: (<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], i64 [[VL]])
124 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
126 vfloat16m8_t
test_vfwcvt_f_xu_v_f16m8_tu(vfloat16m8_t maskedoff
, vuint8m4_t src
, size_t vl
) {
127 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
130 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_x_f_v_i32mf2_tu
131 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], i64 7, i64 [[VL]])
134 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
136 vint32mf2_t
test_vfwcvt_x_f_v_i32mf2_tu(vint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
137 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_x_f_v_i32m1_tu
141 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], i64 7, i64 [[VL]])
144 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
146 vint32m1_t
test_vfwcvt_x_f_v_i32m1_tu(vint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
147 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
150 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_x_f_v_i32m2_tu
151 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], i64 7, i64 [[VL]])
154 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
156 vint32m2_t
test_vfwcvt_x_f_v_i32m2_tu(vint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
157 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
160 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_x_f_v_i32m4_tu
161 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], i64 7, i64 [[VL]])
164 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
166 vint32m4_t
test_vfwcvt_x_f_v_i32m4_tu(vint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
167 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
170 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_x_f_v_i32m8_tu
171 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], i64 7, i64 [[VL]])
174 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
176 vint32m8_t
test_vfwcvt_x_f_v_i32m8_tu(vint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
177 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
180 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_xu_f_v_u32mf2_tu
181 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], i64 7, i64 [[VL]])
184 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
186 vuint32mf2_t
test_vfwcvt_xu_f_v_u32mf2_tu(vuint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
187 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
190 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_xu_f_v_u32m1_tu
191 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], i64 7, i64 [[VL]])
194 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
196 vuint32m1_t
test_vfwcvt_xu_f_v_u32m1_tu(vuint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
197 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
200 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_xu_f_v_u32m2_tu
201 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], i64 7, i64 [[VL]])
204 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
206 vuint32m2_t
test_vfwcvt_xu_f_v_u32m2_tu(vuint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
207 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
210 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_xu_f_v_u32m4_tu
211 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], i64 7, i64 [[VL]])
214 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
216 vuint32m4_t
test_vfwcvt_xu_f_v_u32m4_tu(vuint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
217 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
220 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_xu_f_v_u32m8_tu
221 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], i64 7, i64 [[VL]])
224 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
226 vuint32m8_t
test_vfwcvt_xu_f_v_u32m8_tu(vuint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
227 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
230 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_x_v_f32mf2_tu
231 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[VL]])
234 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
236 vfloat32mf2_t
test_vfwcvt_f_x_v_f32mf2_tu(vfloat32mf2_t maskedoff
, vint16mf4_t src
, size_t vl
) {
237 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
240 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_x_v_f32m1_tu
241 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[VL]])
244 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
246 vfloat32m1_t
test_vfwcvt_f_x_v_f32m1_tu(vfloat32m1_t maskedoff
, vint16mf2_t src
, size_t vl
) {
247 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
250 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_x_v_f32m2_tu
251 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[VL]])
254 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
256 vfloat32m2_t
test_vfwcvt_f_x_v_f32m2_tu(vfloat32m2_t maskedoff
, vint16m1_t src
, size_t vl
) {
257 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
260 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_x_v_f32m4_tu
261 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[VL]])
264 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
266 vfloat32m4_t
test_vfwcvt_f_x_v_f32m4_tu(vfloat32m4_t maskedoff
, vint16m2_t src
, size_t vl
) {
267 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
270 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_x_v_f32m8_tu
271 // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[VL]])
274 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
276 vfloat32m8_t
test_vfwcvt_f_x_v_f32m8_tu(vfloat32m8_t maskedoff
, vint16m4_t src
, size_t vl
) {
277 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
280 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_xu_v_f32mf2_tu
281 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], i64 [[VL]])
284 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
286 vfloat32mf2_t
test_vfwcvt_f_xu_v_f32mf2_tu(vfloat32mf2_t maskedoff
, vuint16mf4_t src
, size_t vl
) {
287 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
290 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_xu_v_f32m1_tu
291 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], i64 [[VL]])
294 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
296 vfloat32m1_t
test_vfwcvt_f_xu_v_f32m1_tu(vfloat32m1_t maskedoff
, vuint16mf2_t src
, size_t vl
) {
297 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
300 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_xu_v_f32m2_tu
301 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], i64 [[VL]])
304 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
306 vfloat32m2_t
test_vfwcvt_f_xu_v_f32m2_tu(vfloat32m2_t maskedoff
, vuint16m1_t src
, size_t vl
) {
307 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
310 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_xu_v_f32m4_tu
311 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], i64 [[VL]])
314 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
316 vfloat32m4_t
test_vfwcvt_f_xu_v_f32m4_tu(vfloat32m4_t maskedoff
, vuint16m2_t src
, size_t vl
) {
317 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
320 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_xu_v_f32m8_tu
321 // CHECK-RV64-SAME: (<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], i64 [[VL]])
324 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
326 vfloat32m8_t
test_vfwcvt_f_xu_v_f32m8_tu(vfloat32m8_t maskedoff
, vuint16m4_t src
, size_t vl
) {
327 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
330 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_x_f_v_i64m1_tu
331 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 7, i64 [[VL]])
334 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
336 vint64m1_t
test_vfwcvt_x_f_v_i64m1_tu(vint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
337 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
340 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_x_f_v_i64m2_tu
341 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 7, i64 [[VL]])
344 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
346 vint64m2_t
test_vfwcvt_x_f_v_i64m2_tu(vint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
347 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
350 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_x_f_v_i64m4_tu
351 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 7, i64 [[VL]])
354 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
356 vint64m4_t
test_vfwcvt_x_f_v_i64m4_tu(vint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
357 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
360 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_x_f_v_i64m8_tu
361 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 7, i64 [[VL]])
364 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
366 vint64m8_t
test_vfwcvt_x_f_v_i64m8_tu(vint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
367 return __riscv_vfwcvt_x_tu(maskedoff
, src
, vl
);
370 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_xu_f_v_u64m1_tu
371 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 7, i64 [[VL]])
374 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
376 vuint64m1_t
test_vfwcvt_xu_f_v_u64m1_tu(vuint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
377 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
380 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_xu_f_v_u64m2_tu
381 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 7, i64 [[VL]])
384 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
386 vuint64m2_t
test_vfwcvt_xu_f_v_u64m2_tu(vuint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
387 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
390 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_xu_f_v_u64m4_tu
391 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
392 // CHECK-RV64-NEXT: entry:
393 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 7, i64 [[VL]])
394 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
396 vuint64m4_t
test_vfwcvt_xu_f_v_u64m4_tu(vuint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
397 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
400 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_xu_f_v_u64m8_tu
401 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
402 // CHECK-RV64-NEXT: entry:
403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 7, i64 [[VL]])
404 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
406 vuint64m8_t
test_vfwcvt_xu_f_v_u64m8_tu(vuint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
407 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, vl
);
410 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfwcvt_f_x_v_f64m1_tu
411 // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
412 // CHECK-RV64-NEXT: entry:
413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[VL]])
414 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
416 vfloat64m1_t
test_vfwcvt_f_x_v_f64m1_tu(vfloat64m1_t maskedoff
, vint32mf2_t src
, size_t vl
) {
417 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
420 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfwcvt_f_x_v_f64m2_tu
421 // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[VL]])
424 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
426 vfloat64m2_t
test_vfwcvt_f_x_v_f64m2_tu(vfloat64m2_t maskedoff
, vint32m1_t src
, size_t vl
) {
427 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
430 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfwcvt_f_x_v_f64m4_tu
431 // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
432 // CHECK-RV64-NEXT: entry:
433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[VL]])
434 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
436 vfloat64m4_t
test_vfwcvt_f_x_v_f64m4_tu(vfloat64m4_t maskedoff
, vint32m2_t src
, size_t vl
) {
437 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
440 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfwcvt_f_x_v_f64m8_tu
441 // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
442 // CHECK-RV64-NEXT: entry:
443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[VL]])
444 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
446 vfloat64m8_t
test_vfwcvt_f_x_v_f64m8_tu(vfloat64m8_t maskedoff
, vint32m4_t src
, size_t vl
) {
447 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
450 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfwcvt_f_xu_v_f64m1_tu
451 // CHECK-RV64-SAME: (<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], i64 [[VL]])
454 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
456 vfloat64m1_t
test_vfwcvt_f_xu_v_f64m1_tu(vfloat64m1_t maskedoff
, vuint32mf2_t src
, size_t vl
) {
457 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
460 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfwcvt_f_xu_v_f64m2_tu
461 // CHECK-RV64-SAME: (<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
462 // CHECK-RV64-NEXT: entry:
463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], i64 [[VL]])
464 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
466 vfloat64m2_t
test_vfwcvt_f_xu_v_f64m2_tu(vfloat64m2_t maskedoff
, vuint32m1_t src
, size_t vl
) {
467 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
470 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfwcvt_f_xu_v_f64m4_tu
471 // CHECK-RV64-SAME: (<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], i64 [[VL]])
474 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
476 vfloat64m4_t
test_vfwcvt_f_xu_v_f64m4_tu(vfloat64m4_t maskedoff
, vuint32m2_t src
, size_t vl
) {
477 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
480 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfwcvt_f_xu_v_f64m8_tu
481 // CHECK-RV64-SAME: (<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], i64 [[VL]])
484 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
486 vfloat64m8_t
test_vfwcvt_f_xu_v_f64m8_tu(vfloat64m8_t maskedoff
, vuint32m4_t src
, size_t vl
) {
487 return __riscv_vfwcvt_f_tu(maskedoff
, src
, vl
);
490 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfwcvt_f_x_v_f16mf4_tum
491 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
492 // CHECK-RV64-NEXT: entry:
493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
494 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
496 vfloat16mf4_t
test_vfwcvt_f_x_v_f16mf4_tum(vbool64_t mask
, vfloat16mf4_t maskedoff
, vint8mf8_t src
, size_t vl
) {
497 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
500 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfwcvt_f_x_v_f16mf2_tum
501 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
502 // CHECK-RV64-NEXT: entry:
503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
504 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
506 vfloat16mf2_t
test_vfwcvt_f_x_v_f16mf2_tum(vbool32_t mask
, vfloat16mf2_t maskedoff
, vint8mf4_t src
, size_t vl
) {
507 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
510 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfwcvt_f_x_v_f16m1_tum
511 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
512 // CHECK-RV64-NEXT: entry:
513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
514 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
516 vfloat16m1_t
test_vfwcvt_f_x_v_f16m1_tum(vbool16_t mask
, vfloat16m1_t maskedoff
, vint8mf2_t src
, size_t vl
) {
517 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
520 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfwcvt_f_x_v_f16m2_tum
521 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
524 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
526 vfloat16m2_t
test_vfwcvt_f_x_v_f16m2_tum(vbool8_t mask
, vfloat16m2_t maskedoff
, vint8m1_t src
, size_t vl
) {
527 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
530 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfwcvt_f_x_v_f16m4_tum
531 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
532 // CHECK-RV64-NEXT: entry:
533 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
534 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
536 vfloat16m4_t
test_vfwcvt_f_x_v_f16m4_tum(vbool4_t mask
, vfloat16m4_t maskedoff
, vint8m2_t src
, size_t vl
) {
537 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
540 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfwcvt_f_x_v_f16m8_tum
541 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
542 // CHECK-RV64-NEXT: entry:
543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
544 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
546 vfloat16m8_t
test_vfwcvt_f_x_v_f16m8_tum(vbool2_t mask
, vfloat16m8_t maskedoff
, vint8m4_t src
, size_t vl
) {
547 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
550 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfwcvt_f_xu_v_f16mf4_tum
551 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
552 // CHECK-RV64-NEXT: entry:
553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
554 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
556 vfloat16mf4_t
test_vfwcvt_f_xu_v_f16mf4_tum(vbool64_t mask
, vfloat16mf4_t maskedoff
, vuint8mf8_t src
, size_t vl
) {
557 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
560 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfwcvt_f_xu_v_f16mf2_tum
561 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
562 // CHECK-RV64-NEXT: entry:
563 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
564 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
566 vfloat16mf2_t
test_vfwcvt_f_xu_v_f16mf2_tum(vbool32_t mask
, vfloat16mf2_t maskedoff
, vuint8mf4_t src
, size_t vl
) {
567 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
570 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfwcvt_f_xu_v_f16m1_tum
571 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
572 // CHECK-RV64-NEXT: entry:
573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
574 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
576 vfloat16m1_t
test_vfwcvt_f_xu_v_f16m1_tum(vbool16_t mask
, vfloat16m1_t maskedoff
, vuint8mf2_t src
, size_t vl
) {
577 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
580 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfwcvt_f_xu_v_f16m2_tum
581 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
584 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
586 vfloat16m2_t
test_vfwcvt_f_xu_v_f16m2_tum(vbool8_t mask
, vfloat16m2_t maskedoff
, vuint8m1_t src
, size_t vl
) {
587 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
590 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfwcvt_f_xu_v_f16m4_tum
591 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
594 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
596 vfloat16m4_t
test_vfwcvt_f_xu_v_f16m4_tum(vbool4_t mask
, vfloat16m4_t maskedoff
, vuint8m2_t src
, size_t vl
) {
597 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
600 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfwcvt_f_xu_v_f16m8_tum
601 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
602 // CHECK-RV64-NEXT: entry:
603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 2)
604 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
606 vfloat16m8_t
test_vfwcvt_f_xu_v_f16m8_tum(vbool2_t mask
, vfloat16m8_t maskedoff
, vuint8m4_t src
, size_t vl
) {
607 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
610 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_x_f_v_i32mf2_tum
611 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
612 // CHECK-RV64-NEXT: entry:
613 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
614 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
616 vint32mf2_t
test_vfwcvt_x_f_v_i32mf2_tum(vbool64_t mask
, vint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
617 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
620 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_x_f_v_i32m1_tum
621 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
622 // CHECK-RV64-NEXT: entry:
623 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
624 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
626 vint32m1_t
test_vfwcvt_x_f_v_i32m1_tum(vbool32_t mask
, vint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
627 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
630 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_x_f_v_i32m2_tum
631 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
632 // CHECK-RV64-NEXT: entry:
633 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
634 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
636 vint32m2_t
test_vfwcvt_x_f_v_i32m2_tum(vbool16_t mask
, vint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
637 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
640 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_x_f_v_i32m4_tum
641 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
642 // CHECK-RV64-NEXT: entry:
643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
644 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
646 vint32m4_t
test_vfwcvt_x_f_v_i32m4_tum(vbool8_t mask
, vint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
647 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
650 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_x_f_v_i32m8_tum
651 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
652 // CHECK-RV64-NEXT: entry:
653 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
654 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
656 vint32m8_t
test_vfwcvt_x_f_v_i32m8_tum(vbool4_t mask
, vint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
657 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
660 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_xu_f_v_u32mf2_tum
661 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
664 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
666 vuint32mf2_t
test_vfwcvt_xu_f_v_u32mf2_tum(vbool64_t mask
, vuint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
667 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
670 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_xu_f_v_u32m1_tum
671 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
672 // CHECK-RV64-NEXT: entry:
673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
674 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
676 vuint32m1_t
test_vfwcvt_xu_f_v_u32m1_tum(vbool32_t mask
, vuint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
677 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
680 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_xu_f_v_u32m2_tum
681 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
682 // CHECK-RV64-NEXT: entry:
683 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
684 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
686 vuint32m2_t
test_vfwcvt_xu_f_v_u32m2_tum(vbool16_t mask
, vuint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
687 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
690 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_xu_f_v_u32m4_tum
691 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
692 // CHECK-RV64-NEXT: entry:
693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
694 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
696 vuint32m4_t
test_vfwcvt_xu_f_v_u32m4_tum(vbool8_t mask
, vuint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
697 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
700 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_xu_f_v_u32m8_tum
701 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
702 // CHECK-RV64-NEXT: entry:
703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
704 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
706 vuint32m8_t
test_vfwcvt_xu_f_v_u32m8_tum(vbool4_t mask
, vuint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
707 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
710 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_x_v_f32mf2_tum
711 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
712 // CHECK-RV64-NEXT: entry:
713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
714 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
716 vfloat32mf2_t
test_vfwcvt_f_x_v_f32mf2_tum(vbool64_t mask
, vfloat32mf2_t maskedoff
, vint16mf4_t src
, size_t vl
) {
717 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
720 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_x_v_f32m1_tum
721 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
722 // CHECK-RV64-NEXT: entry:
723 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
724 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
726 vfloat32m1_t
test_vfwcvt_f_x_v_f32m1_tum(vbool32_t mask
, vfloat32m1_t maskedoff
, vint16mf2_t src
, size_t vl
) {
727 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
730 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_x_v_f32m2_tum
731 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
732 // CHECK-RV64-NEXT: entry:
733 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
734 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
736 vfloat32m2_t
test_vfwcvt_f_x_v_f32m2_tum(vbool16_t mask
, vfloat32m2_t maskedoff
, vint16m1_t src
, size_t vl
) {
737 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
740 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_x_v_f32m4_tum
741 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
742 // CHECK-RV64-NEXT: entry:
743 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
744 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
746 vfloat32m4_t
test_vfwcvt_f_x_v_f32m4_tum(vbool8_t mask
, vfloat32m4_t maskedoff
, vint16m2_t src
, size_t vl
) {
747 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
750 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_x_v_f32m8_tum
751 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
752 // CHECK-RV64-NEXT: entry:
753 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
754 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
756 vfloat32m8_t
test_vfwcvt_f_x_v_f32m8_tum(vbool4_t mask
, vfloat32m8_t maskedoff
, vint16m4_t src
, size_t vl
) {
757 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
760 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_xu_v_f32mf2_tum
761 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
762 // CHECK-RV64-NEXT: entry:
763 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
764 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
766 vfloat32mf2_t
test_vfwcvt_f_xu_v_f32mf2_tum(vbool64_t mask
, vfloat32mf2_t maskedoff
, vuint16mf4_t src
, size_t vl
) {
767 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
770 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_xu_v_f32m1_tum
771 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
772 // CHECK-RV64-NEXT: entry:
773 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
774 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
776 vfloat32m1_t
test_vfwcvt_f_xu_v_f32m1_tum(vbool32_t mask
, vfloat32m1_t maskedoff
, vuint16mf2_t src
, size_t vl
) {
777 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
780 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_xu_v_f32m2_tum
781 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
782 // CHECK-RV64-NEXT: entry:
783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
784 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
786 vfloat32m2_t
test_vfwcvt_f_xu_v_f32m2_tum(vbool16_t mask
, vfloat32m2_t maskedoff
, vuint16m1_t src
, size_t vl
) {
787 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
790 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_xu_v_f32m4_tum
791 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
792 // CHECK-RV64-NEXT: entry:
793 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
794 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
796 vfloat32m4_t
test_vfwcvt_f_xu_v_f32m4_tum(vbool8_t mask
, vfloat32m4_t maskedoff
, vuint16m2_t src
, size_t vl
) {
797 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
800 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_xu_v_f32m8_tum
801 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
802 // CHECK-RV64-NEXT: entry:
803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
804 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
806 vfloat32m8_t
test_vfwcvt_f_xu_v_f32m8_tum(vbool4_t mask
, vfloat32m8_t maskedoff
, vuint16m4_t src
, size_t vl
) {
807 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
810 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_x_f_v_i64m1_tum
811 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
812 // CHECK-RV64-NEXT: entry:
813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
814 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
816 vint64m1_t
test_vfwcvt_x_f_v_i64m1_tum(vbool64_t mask
, vint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
817 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
820 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_x_f_v_i64m2_tum
821 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
822 // CHECK-RV64-NEXT: entry:
823 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
824 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
826 vint64m2_t
test_vfwcvt_x_f_v_i64m2_tum(vbool32_t mask
, vint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
827 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
830 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_x_f_v_i64m4_tum
831 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
832 // CHECK-RV64-NEXT: entry:
833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
834 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
836 vint64m4_t
test_vfwcvt_x_f_v_i64m4_tum(vbool16_t mask
, vint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
837 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
840 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_x_f_v_i64m8_tum
841 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
842 // CHECK-RV64-NEXT: entry:
843 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
844 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
846 vint64m8_t
test_vfwcvt_x_f_v_i64m8_tum(vbool8_t mask
, vint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
847 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, vl
);
850 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_xu_f_v_u64m1_tum
851 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
852 // CHECK-RV64-NEXT: entry:
853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
854 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
856 vuint64m1_t
test_vfwcvt_xu_f_v_u64m1_tum(vbool64_t mask
, vuint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
857 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
860 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_xu_f_v_u64m2_tum
861 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
862 // CHECK-RV64-NEXT: entry:
863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
864 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
866 vuint64m2_t
test_vfwcvt_xu_f_v_u64m2_tum(vbool32_t mask
, vuint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
867 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
870 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_xu_f_v_u64m4_tum
871 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
872 // CHECK-RV64-NEXT: entry:
873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
874 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
876 vuint64m4_t
test_vfwcvt_xu_f_v_u64m4_tum(vbool16_t mask
, vuint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
877 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
880 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_xu_f_v_u64m8_tum
881 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
882 // CHECK-RV64-NEXT: entry:
883 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 2)
884 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
886 vuint64m8_t
test_vfwcvt_xu_f_v_u64m8_tum(vbool8_t mask
, vuint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
887 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, vl
);
890 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfwcvt_f_x_v_f64m1_tum
891 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
892 // CHECK-RV64-NEXT: entry:
893 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
894 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
896 vfloat64m1_t
test_vfwcvt_f_x_v_f64m1_tum(vbool64_t mask
, vfloat64m1_t maskedoff
, vint32mf2_t src
, size_t vl
) {
897 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
900 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfwcvt_f_x_v_f64m2_tum
901 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
902 // CHECK-RV64-NEXT: entry:
903 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
904 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
906 vfloat64m2_t
test_vfwcvt_f_x_v_f64m2_tum(vbool32_t mask
, vfloat64m2_t maskedoff
, vint32m1_t src
, size_t vl
) {
907 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
910 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfwcvt_f_x_v_f64m4_tum
911 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
912 // CHECK-RV64-NEXT: entry:
913 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
914 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
916 vfloat64m4_t
test_vfwcvt_f_x_v_f64m4_tum(vbool16_t mask
, vfloat64m4_t maskedoff
, vint32m2_t src
, size_t vl
) {
917 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
920 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfwcvt_f_x_v_f64m8_tum
921 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
922 // CHECK-RV64-NEXT: entry:
923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
924 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
926 vfloat64m8_t
test_vfwcvt_f_x_v_f64m8_tum(vbool8_t mask
, vfloat64m8_t maskedoff
, vint32m4_t src
, size_t vl
) {
927 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
930 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfwcvt_f_xu_v_f64m1_tum
931 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
932 // CHECK-RV64-NEXT: entry:
933 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
934 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
936 vfloat64m1_t
test_vfwcvt_f_xu_v_f64m1_tum(vbool64_t mask
, vfloat64m1_t maskedoff
, vuint32mf2_t src
, size_t vl
) {
937 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
940 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfwcvt_f_xu_v_f64m2_tum
941 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
942 // CHECK-RV64-NEXT: entry:
943 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
944 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
946 vfloat64m2_t
test_vfwcvt_f_xu_v_f64m2_tum(vbool32_t mask
, vfloat64m2_t maskedoff
, vuint32m1_t src
, size_t vl
) {
947 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
950 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfwcvt_f_xu_v_f64m4_tum
951 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
952 // CHECK-RV64-NEXT: entry:
953 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
954 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
956 vfloat64m4_t
test_vfwcvt_f_xu_v_f64m4_tum(vbool16_t mask
, vfloat64m4_t maskedoff
, vuint32m2_t src
, size_t vl
) {
957 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
960 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfwcvt_f_xu_v_f64m8_tum
961 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
962 // CHECK-RV64-NEXT: entry:
963 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
964 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
966 vfloat64m8_t
test_vfwcvt_f_xu_v_f64m8_tum(vbool8_t mask
, vfloat64m8_t maskedoff
, vuint32m4_t src
, size_t vl
) {
967 return __riscv_vfwcvt_f_tum(mask
, maskedoff
, src
, vl
);
971 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfwcvt_f_x_v_f16mf4_tumu
972 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
973 // CHECK-RV64-NEXT: entry:
974 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
975 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
977 vfloat16mf4_t
test_vfwcvt_f_x_v_f16mf4_tumu(vbool64_t mask
, vfloat16mf4_t maskedoff
, vint8mf8_t src
, size_t vl
) {
978 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
981 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfwcvt_f_x_v_f16mf2_tumu
982 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
983 // CHECK-RV64-NEXT: entry:
984 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
985 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
987 vfloat16mf2_t
test_vfwcvt_f_x_v_f16mf2_tumu(vbool32_t mask
, vfloat16mf2_t maskedoff
, vint8mf4_t src
, size_t vl
) {
988 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
991 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfwcvt_f_x_v_f16m1_tumu
992 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
993 // CHECK-RV64-NEXT: entry:
994 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
995 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
997 vfloat16m1_t
test_vfwcvt_f_x_v_f16m1_tumu(vbool16_t mask
, vfloat16m1_t maskedoff
, vint8mf2_t src
, size_t vl
) {
998 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1001 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfwcvt_f_x_v_f16m2_tumu
1002 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1003 // CHECK-RV64-NEXT: entry:
1004 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1005 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
1007 vfloat16m2_t
test_vfwcvt_f_x_v_f16m2_tumu(vbool8_t mask
, vfloat16m2_t maskedoff
, vint8m1_t src
, size_t vl
) {
1008 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1011 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfwcvt_f_x_v_f16m4_tumu
1012 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1013 // CHECK-RV64-NEXT: entry:
1014 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1015 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
1017 vfloat16m4_t
test_vfwcvt_f_x_v_f16m4_tumu(vbool4_t mask
, vfloat16m4_t maskedoff
, vint8m2_t src
, size_t vl
) {
1018 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1021 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfwcvt_f_x_v_f16m8_tumu
1022 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1023 // CHECK-RV64-NEXT: entry:
1024 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1025 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
1027 vfloat16m8_t
test_vfwcvt_f_x_v_f16m8_tumu(vbool2_t mask
, vfloat16m8_t maskedoff
, vint8m4_t src
, size_t vl
) {
1028 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1031 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfwcvt_f_xu_v_f16mf4_tumu
1032 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1033 // CHECK-RV64-NEXT: entry:
1034 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1035 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
1037 vfloat16mf4_t
test_vfwcvt_f_xu_v_f16mf4_tumu(vbool64_t mask
, vfloat16mf4_t maskedoff
, vuint8mf8_t src
, size_t vl
) {
1038 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1041 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfwcvt_f_xu_v_f16mf2_tumu
1042 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1043 // CHECK-RV64-NEXT: entry:
1044 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1045 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
1047 vfloat16mf2_t
test_vfwcvt_f_xu_v_f16mf2_tumu(vbool32_t mask
, vfloat16mf2_t maskedoff
, vuint8mf4_t src
, size_t vl
) {
1048 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1051 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfwcvt_f_xu_v_f16m1_tumu
1052 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1053 // CHECK-RV64-NEXT: entry:
1054 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1055 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
1057 vfloat16m1_t
test_vfwcvt_f_xu_v_f16m1_tumu(vbool16_t mask
, vfloat16m1_t maskedoff
, vuint8mf2_t src
, size_t vl
) {
1058 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1061 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfwcvt_f_xu_v_f16m2_tumu
1062 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1063 // CHECK-RV64-NEXT: entry:
1064 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1065 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
1067 vfloat16m2_t
test_vfwcvt_f_xu_v_f16m2_tumu(vbool8_t mask
, vfloat16m2_t maskedoff
, vuint8m1_t src
, size_t vl
) {
1068 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1071 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfwcvt_f_xu_v_f16m4_tumu
1072 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1073 // CHECK-RV64-NEXT: entry:
1074 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1075 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
1077 vfloat16m4_t
test_vfwcvt_f_xu_v_f16m4_tumu(vbool4_t mask
, vfloat16m4_t maskedoff
, vuint8m2_t src
, size_t vl
) {
1078 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1081 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfwcvt_f_xu_v_f16m8_tumu
1082 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1083 // CHECK-RV64-NEXT: entry:
1084 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 0)
1085 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
1087 vfloat16m8_t
test_vfwcvt_f_xu_v_f16m8_tumu(vbool2_t mask
, vfloat16m8_t maskedoff
, vuint8m4_t src
, size_t vl
) {
1088 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1091 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_x_f_v_i32mf2_tumu
1092 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1093 // CHECK-RV64-NEXT: entry:
1094 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1095 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1097 vint32mf2_t
test_vfwcvt_x_f_v_i32mf2_tumu(vbool64_t mask
, vint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
1098 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1101 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_x_f_v_i32m1_tumu
1102 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1103 // CHECK-RV64-NEXT: entry:
1104 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1105 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1107 vint32m1_t
test_vfwcvt_x_f_v_i32m1_tumu(vbool32_t mask
, vint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
1108 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1111 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_x_f_v_i32m2_tumu
1112 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1113 // CHECK-RV64-NEXT: entry:
1114 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1115 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1117 vint32m2_t
test_vfwcvt_x_f_v_i32m2_tumu(vbool16_t mask
, vint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
1118 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1121 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_x_f_v_i32m4_tumu
1122 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1123 // CHECK-RV64-NEXT: entry:
1124 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1125 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1127 vint32m4_t
test_vfwcvt_x_f_v_i32m4_tumu(vbool8_t mask
, vint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
1128 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1131 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_x_f_v_i32m8_tumu
1132 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1133 // CHECK-RV64-NEXT: entry:
1134 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1135 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1137 vint32m8_t
test_vfwcvt_x_f_v_i32m8_tumu(vbool4_t mask
, vint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
1138 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1141 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_xu_f_v_u32mf2_tumu
1142 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1143 // CHECK-RV64-NEXT: entry:
1144 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1145 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1147 vuint32mf2_t
test_vfwcvt_xu_f_v_u32mf2_tumu(vbool64_t mask
, vuint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
1148 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1151 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_xu_f_v_u32m1_tumu
1152 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1153 // CHECK-RV64-NEXT: entry:
1154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1155 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1157 vuint32m1_t
test_vfwcvt_xu_f_v_u32m1_tumu(vbool32_t mask
, vuint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
1158 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1161 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_xu_f_v_u32m2_tumu
1162 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1163 // CHECK-RV64-NEXT: entry:
1164 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1165 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1167 vuint32m2_t
test_vfwcvt_xu_f_v_u32m2_tumu(vbool16_t mask
, vuint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
1168 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1171 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_xu_f_v_u32m4_tumu
1172 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1173 // CHECK-RV64-NEXT: entry:
1174 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1175 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1177 vuint32m4_t
test_vfwcvt_xu_f_v_u32m4_tumu(vbool8_t mask
, vuint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
1178 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1181 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_xu_f_v_u32m8_tumu
1182 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1183 // CHECK-RV64-NEXT: entry:
1184 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1185 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1187 vuint32m8_t
test_vfwcvt_xu_f_v_u32m8_tumu(vbool4_t mask
, vuint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
1188 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1191 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_x_v_f32mf2_tumu
1192 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1193 // CHECK-RV64-NEXT: entry:
1194 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1195 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
1197 vfloat32mf2_t
test_vfwcvt_f_x_v_f32mf2_tumu(vbool64_t mask
, vfloat32mf2_t maskedoff
, vint16mf4_t src
, size_t vl
) {
1198 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1201 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_x_v_f32m1_tumu
1202 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1203 // CHECK-RV64-NEXT: entry:
1204 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1205 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
1207 vfloat32m1_t
test_vfwcvt_f_x_v_f32m1_tumu(vbool32_t mask
, vfloat32m1_t maskedoff
, vint16mf2_t src
, size_t vl
) {
1208 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1211 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_x_v_f32m2_tumu
1212 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1213 // CHECK-RV64-NEXT: entry:
1214 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1215 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
1217 vfloat32m2_t
test_vfwcvt_f_x_v_f32m2_tumu(vbool16_t mask
, vfloat32m2_t maskedoff
, vint16m1_t src
, size_t vl
) {
1218 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1221 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_x_v_f32m4_tumu
1222 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1223 // CHECK-RV64-NEXT: entry:
1224 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1225 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
1227 vfloat32m4_t
test_vfwcvt_f_x_v_f32m4_tumu(vbool8_t mask
, vfloat32m4_t maskedoff
, vint16m2_t src
, size_t vl
) {
1228 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1231 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_x_v_f32m8_tumu
1232 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1233 // CHECK-RV64-NEXT: entry:
1234 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1235 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
1237 vfloat32m8_t
test_vfwcvt_f_x_v_f32m8_tumu(vbool4_t mask
, vfloat32m8_t maskedoff
, vint16m4_t src
, size_t vl
) {
1238 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1241 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_xu_v_f32mf2_tumu
1242 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1243 // CHECK-RV64-NEXT: entry:
1244 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1245 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
1247 vfloat32mf2_t
test_vfwcvt_f_xu_v_f32mf2_tumu(vbool64_t mask
, vfloat32mf2_t maskedoff
, vuint16mf4_t src
, size_t vl
) {
1248 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1251 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_xu_v_f32m1_tumu
1252 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1253 // CHECK-RV64-NEXT: entry:
1254 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1255 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
1257 vfloat32m1_t
test_vfwcvt_f_xu_v_f32m1_tumu(vbool32_t mask
, vfloat32m1_t maskedoff
, vuint16mf2_t src
, size_t vl
) {
1258 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1261 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_xu_v_f32m2_tumu
1262 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1263 // CHECK-RV64-NEXT: entry:
1264 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1265 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
1267 vfloat32m2_t
test_vfwcvt_f_xu_v_f32m2_tumu(vbool16_t mask
, vfloat32m2_t maskedoff
, vuint16m1_t src
, size_t vl
) {
1268 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1271 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_xu_v_f32m4_tumu
1272 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1273 // CHECK-RV64-NEXT: entry:
1274 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1275 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
1277 vfloat32m4_t
test_vfwcvt_f_xu_v_f32m4_tumu(vbool8_t mask
, vfloat32m4_t maskedoff
, vuint16m2_t src
, size_t vl
) {
1278 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1281 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_xu_v_f32m8_tumu
1282 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1283 // CHECK-RV64-NEXT: entry:
1284 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
1285 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
1287 vfloat32m8_t
test_vfwcvt_f_xu_v_f32m8_tumu(vbool4_t mask
, vfloat32m8_t maskedoff
, vuint16m4_t src
, size_t vl
) {
1288 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1291 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_x_f_v_i64m1_tumu
1292 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1293 // CHECK-RV64-NEXT: entry:
1294 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1295 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1297 vint64m1_t
test_vfwcvt_x_f_v_i64m1_tumu(vbool64_t mask
, vint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
1298 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1301 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_x_f_v_i64m2_tumu
1302 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1303 // CHECK-RV64-NEXT: entry:
1304 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1305 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1307 vint64m2_t
test_vfwcvt_x_f_v_i64m2_tumu(vbool32_t mask
, vint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
1308 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1311 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_x_f_v_i64m4_tumu
1312 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1313 // CHECK-RV64-NEXT: entry:
1314 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1315 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1317 vint64m4_t
test_vfwcvt_x_f_v_i64m4_tumu(vbool16_t mask
, vint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
1318 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1321 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_x_f_v_i64m8_tumu
1322 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1323 // CHECK-RV64-NEXT: entry:
1324 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1325 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1327 vint64m8_t
test_vfwcvt_x_f_v_i64m8_tumu(vbool8_t mask
, vint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
1328 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, vl
);
1331 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_xu_f_v_u64m1_tumu
1332 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1333 // CHECK-RV64-NEXT: entry:
1334 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1335 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1337 vuint64m1_t
test_vfwcvt_xu_f_v_u64m1_tumu(vbool64_t mask
, vuint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
1338 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1341 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_xu_f_v_u64m2_tumu
1342 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1343 // CHECK-RV64-NEXT: entry:
1344 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1345 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1347 vuint64m2_t
test_vfwcvt_xu_f_v_u64m2_tumu(vbool32_t mask
, vuint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
1348 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1351 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_xu_f_v_u64m4_tumu
1352 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1353 // CHECK-RV64-NEXT: entry:
1354 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1355 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1357 vuint64m4_t
test_vfwcvt_xu_f_v_u64m4_tumu(vbool16_t mask
, vuint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
1358 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1361 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_xu_f_v_u64m8_tumu
1362 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1363 // CHECK-RV64-NEXT: entry:
1364 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 0)
1365 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1367 vuint64m8_t
test_vfwcvt_xu_f_v_u64m8_tumu(vbool8_t mask
, vuint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
1368 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, vl
);
1371 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfwcvt_f_x_v_f64m1_tumu
1372 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1373 // CHECK-RV64-NEXT: entry:
1374 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1375 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
1377 vfloat64m1_t
test_vfwcvt_f_x_v_f64m1_tumu(vbool64_t mask
, vfloat64m1_t maskedoff
, vint32mf2_t src
, size_t vl
) {
1378 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1381 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfwcvt_f_x_v_f64m2_tumu
1382 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1383 // CHECK-RV64-NEXT: entry:
1384 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1385 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
1387 vfloat64m2_t
test_vfwcvt_f_x_v_f64m2_tumu(vbool32_t mask
, vfloat64m2_t maskedoff
, vint32m1_t src
, size_t vl
) {
1388 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1391 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfwcvt_f_x_v_f64m4_tumu
1392 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1393 // CHECK-RV64-NEXT: entry:
1394 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1395 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
1397 vfloat64m4_t
test_vfwcvt_f_x_v_f64m4_tumu(vbool16_t mask
, vfloat64m4_t maskedoff
, vint32m2_t src
, size_t vl
) {
1398 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1401 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfwcvt_f_x_v_f64m8_tumu
1402 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1403 // CHECK-RV64-NEXT: entry:
1404 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1405 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
1407 vfloat64m8_t
test_vfwcvt_f_x_v_f64m8_tumu(vbool8_t mask
, vfloat64m8_t maskedoff
, vint32m4_t src
, size_t vl
) {
1408 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1411 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfwcvt_f_xu_v_f64m1_tumu
1412 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1413 // CHECK-RV64-NEXT: entry:
1414 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
1415 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
1417 vfloat64m1_t
test_vfwcvt_f_xu_v_f64m1_tumu(vbool64_t mask
, vfloat64m1_t maskedoff
, vuint32mf2_t src
, size_t vl
) {
1418 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1421 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfwcvt_f_xu_v_f64m2_tumu
1422 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1423 // CHECK-RV64-NEXT: entry:
1424 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
1425 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
1427 vfloat64m2_t
test_vfwcvt_f_xu_v_f64m2_tumu(vbool32_t mask
, vfloat64m2_t maskedoff
, vuint32m1_t src
, size_t vl
) {
1428 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1431 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfwcvt_f_xu_v_f64m4_tumu
1432 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1433 // CHECK-RV64-NEXT: entry:
1434 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
1435 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
1437 vfloat64m4_t
test_vfwcvt_f_xu_v_f64m4_tumu(vbool16_t mask
, vfloat64m4_t maskedoff
, vuint32m2_t src
, size_t vl
) {
1438 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1441 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfwcvt_f_xu_v_f64m8_tumu
1442 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1443 // CHECK-RV64-NEXT: entry:
1444 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
1445 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
1447 vfloat64m8_t
test_vfwcvt_f_xu_v_f64m8_tumu(vbool8_t mask
, vfloat64m8_t maskedoff
, vuint32m4_t src
, size_t vl
) {
1448 return __riscv_vfwcvt_f_tumu(mask
, maskedoff
, src
, vl
);
1451 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfwcvt_f_x_v_f16mf4_mu
1452 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1453 // CHECK-RV64-NEXT: entry:
1454 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1455 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
1457 vfloat16mf4_t
test_vfwcvt_f_x_v_f16mf4_mu(vbool64_t mask
, vfloat16mf4_t maskedoff
, vint8mf8_t src
, size_t vl
) {
1458 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1461 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfwcvt_f_x_v_f16mf2_mu
1462 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1463 // CHECK-RV64-NEXT: entry:
1464 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1465 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
1467 vfloat16mf2_t
test_vfwcvt_f_x_v_f16mf2_mu(vbool32_t mask
, vfloat16mf2_t maskedoff
, vint8mf4_t src
, size_t vl
) {
1468 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1471 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfwcvt_f_x_v_f16m1_mu
1472 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1473 // CHECK-RV64-NEXT: entry:
1474 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1475 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
1477 vfloat16m1_t
test_vfwcvt_f_x_v_f16m1_mu(vbool16_t mask
, vfloat16m1_t maskedoff
, vint8mf2_t src
, size_t vl
) {
1478 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1481 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfwcvt_f_x_v_f16m2_mu
1482 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1483 // CHECK-RV64-NEXT: entry:
1484 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1485 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
1487 vfloat16m2_t
test_vfwcvt_f_x_v_f16m2_mu(vbool8_t mask
, vfloat16m2_t maskedoff
, vint8m1_t src
, size_t vl
) {
1488 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1491 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfwcvt_f_x_v_f16m4_mu
1492 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1493 // CHECK-RV64-NEXT: entry:
1494 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1495 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
1497 vfloat16m4_t
test_vfwcvt_f_x_v_f16m4_mu(vbool4_t mask
, vfloat16m4_t maskedoff
, vint8m2_t src
, size_t vl
) {
1498 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1501 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfwcvt_f_x_v_f16m8_mu
1502 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1503 // CHECK-RV64-NEXT: entry:
1504 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
1505 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
1507 vfloat16m8_t
test_vfwcvt_f_x_v_f16m8_mu(vbool2_t mask
, vfloat16m8_t maskedoff
, vint8m4_t src
, size_t vl
) {
1508 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1511 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfwcvt_f_xu_v_f16mf4_mu
1512 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1513 // CHECK-RV64-NEXT: entry:
1514 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f16.nxv1i8.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x i8> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1515 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
1517 vfloat16mf4_t
test_vfwcvt_f_xu_v_f16mf4_mu(vbool64_t mask
, vfloat16mf4_t maskedoff
, vuint8mf8_t src
, size_t vl
) {
1518 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1521 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfwcvt_f_xu_v_f16mf2_mu
1522 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1523 // CHECK-RV64-NEXT: entry:
1524 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f16.nxv2i8.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x i8> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1525 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
1527 vfloat16mf2_t
test_vfwcvt_f_xu_v_f16mf2_mu(vbool32_t mask
, vfloat16mf2_t maskedoff
, vuint8mf4_t src
, size_t vl
) {
1528 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1531 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfwcvt_f_xu_v_f16m1_mu
1532 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1533 // CHECK-RV64-NEXT: entry:
1534 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f16.nxv4i8.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x i8> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1535 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
1537 vfloat16m1_t
test_vfwcvt_f_xu_v_f16m1_mu(vbool16_t mask
, vfloat16m1_t maskedoff
, vuint8mf2_t src
, size_t vl
) {
1538 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1541 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfwcvt_f_xu_v_f16m2_mu
1542 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1543 // CHECK-RV64-NEXT: entry:
1544 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x i8> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1545 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
1547 vfloat16m2_t
test_vfwcvt_f_xu_v_f16m2_mu(vbool8_t mask
, vfloat16m2_t maskedoff
, vuint8m1_t src
, size_t vl
) {
1548 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1551 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfwcvt_f_xu_v_f16m4_mu
1552 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1553 // CHECK-RV64-NEXT: entry:
1554 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x i8> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1555 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
1557 vfloat16m4_t
test_vfwcvt_f_xu_v_f16m4_mu(vbool4_t mask
, vfloat16m4_t maskedoff
, vuint8m2_t src
, size_t vl
) {
1558 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1561 // CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vfwcvt_f_xu_v_f16m8_mu
1562 // CHECK-RV64-SAME: (<vscale x 32 x i1> [[MASK:%.*]], <vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1563 // CHECK-RV64-NEXT: entry:
1564 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64(<vscale x 32 x half> [[MASKEDOFF]], <vscale x 32 x i8> [[SRC]], <vscale x 32 x i1> [[MASK]], i64 [[VL]], i64 1)
1565 // CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
1567 vfloat16m8_t
test_vfwcvt_f_xu_v_f16m8_mu(vbool2_t mask
, vfloat16m8_t maskedoff
, vuint8m4_t src
, size_t vl
) {
1568 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1571 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_x_f_v_i32mf2_mu
1572 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1573 // CHECK-RV64-NEXT: entry:
1574 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1575 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1577 vint32mf2_t
test_vfwcvt_x_f_v_i32mf2_mu(vbool64_t mask
, vint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
1578 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1581 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_x_f_v_i32m1_mu
1582 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1583 // CHECK-RV64-NEXT: entry:
1584 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1585 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1587 vint32m1_t
test_vfwcvt_x_f_v_i32m1_mu(vbool32_t mask
, vint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
1588 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1591 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_x_f_v_i32m2_mu
1592 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1593 // CHECK-RV64-NEXT: entry:
1594 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1595 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1597 vint32m2_t
test_vfwcvt_x_f_v_i32m2_mu(vbool16_t mask
, vint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
1598 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1601 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_x_f_v_i32m4_mu
1602 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1603 // CHECK-RV64-NEXT: entry:
1604 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1605 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1607 vint32m4_t
test_vfwcvt_x_f_v_i32m4_mu(vbool8_t mask
, vint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
1608 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1611 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_x_f_v_i32m8_mu
1612 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1613 // CHECK-RV64-NEXT: entry:
1614 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1615 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1617 vint32m8_t
test_vfwcvt_x_f_v_i32m8_mu(vbool4_t mask
, vint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
1618 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1621 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_xu_f_v_u32mf2_mu
1622 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1623 // CHECK-RV64-NEXT: entry:
1624 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1625 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1627 vuint32mf2_t
test_vfwcvt_xu_f_v_u32mf2_mu(vbool64_t mask
, vuint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
1628 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1631 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_xu_f_v_u32m1_mu
1632 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1633 // CHECK-RV64-NEXT: entry:
1634 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1635 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1637 vuint32m1_t
test_vfwcvt_xu_f_v_u32m1_mu(vbool32_t mask
, vuint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
1638 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1641 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_xu_f_v_u32m2_mu
1642 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1643 // CHECK-RV64-NEXT: entry:
1644 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1645 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1647 vuint32m2_t
test_vfwcvt_xu_f_v_u32m2_mu(vbool16_t mask
, vuint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
1648 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1651 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_xu_f_v_u32m4_mu
1652 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1653 // CHECK-RV64-NEXT: entry:
1654 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1655 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1657 vuint32m4_t
test_vfwcvt_xu_f_v_u32m4_mu(vbool8_t mask
, vuint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
1658 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1661 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_xu_f_v_u32m8_mu
1662 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1663 // CHECK-RV64-NEXT: entry:
1664 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1665 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1667 vuint32m8_t
test_vfwcvt_xu_f_v_u32m8_mu(vbool4_t mask
, vuint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
1668 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1671 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_x_v_f32mf2_mu
1672 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1673 // CHECK-RV64-NEXT: entry:
1674 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1675 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
1677 vfloat32mf2_t
test_vfwcvt_f_x_v_f32mf2_mu(vbool64_t mask
, vfloat32mf2_t maskedoff
, vint16mf4_t src
, size_t vl
) {
1678 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1681 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_x_v_f32m1_mu
1682 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1683 // CHECK-RV64-NEXT: entry:
1684 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1685 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
1687 vfloat32m1_t
test_vfwcvt_f_x_v_f32m1_mu(vbool32_t mask
, vfloat32m1_t maskedoff
, vint16mf2_t src
, size_t vl
) {
1688 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1691 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_x_v_f32m2_mu
1692 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1693 // CHECK-RV64-NEXT: entry:
1694 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1695 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
1697 vfloat32m2_t
test_vfwcvt_f_x_v_f32m2_mu(vbool16_t mask
, vfloat32m2_t maskedoff
, vint16m1_t src
, size_t vl
) {
1698 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1701 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_x_v_f32m4_mu
1702 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1703 // CHECK-RV64-NEXT: entry:
1704 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1705 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
1707 vfloat32m4_t
test_vfwcvt_f_x_v_f32m4_mu(vbool8_t mask
, vfloat32m4_t maskedoff
, vint16m2_t src
, size_t vl
) {
1708 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1711 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_x_v_f32m8_mu
1712 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1713 // CHECK-RV64-NEXT: entry:
1714 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1715 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
1717 vfloat32m8_t
test_vfwcvt_f_x_v_f32m8_mu(vbool4_t mask
, vfloat32m8_t maskedoff
, vint16m4_t src
, size_t vl
) {
1718 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1721 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfwcvt_f_xu_v_f32mf2_mu
1722 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1723 // CHECK-RV64-NEXT: entry:
1724 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f32.nxv1i16.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x i16> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1725 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
1727 vfloat32mf2_t
test_vfwcvt_f_xu_v_f32mf2_mu(vbool64_t mask
, vfloat32mf2_t maskedoff
, vuint16mf4_t src
, size_t vl
) {
1728 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1731 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfwcvt_f_xu_v_f32m1_mu
1732 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1733 // CHECK-RV64-NEXT: entry:
1734 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f32.nxv2i16.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x i16> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1735 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
1737 vfloat32m1_t
test_vfwcvt_f_xu_v_f32m1_mu(vbool32_t mask
, vfloat32m1_t maskedoff
, vuint16mf2_t src
, size_t vl
) {
1738 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1741 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfwcvt_f_xu_v_f32m2_mu
1742 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1743 // CHECK-RV64-NEXT: entry:
1744 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x i16> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1745 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
1747 vfloat32m2_t
test_vfwcvt_f_xu_v_f32m2_mu(vbool16_t mask
, vfloat32m2_t maskedoff
, vuint16m1_t src
, size_t vl
) {
1748 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1751 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfwcvt_f_xu_v_f32m4_mu
1752 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1753 // CHECK-RV64-NEXT: entry:
1754 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x i16> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1755 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
1757 vfloat32m4_t
test_vfwcvt_f_xu_v_f32m4_mu(vbool8_t mask
, vfloat32m4_t maskedoff
, vuint16m2_t src
, size_t vl
) {
1758 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1761 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vfwcvt_f_xu_v_f32m8_mu
1762 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1763 // CHECK-RV64-NEXT: entry:
1764 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64(<vscale x 16 x float> [[MASKEDOFF]], <vscale x 16 x i16> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
1765 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
1767 vfloat32m8_t
test_vfwcvt_f_xu_v_f32m8_mu(vbool4_t mask
, vfloat32m8_t maskedoff
, vuint16m4_t src
, size_t vl
) {
1768 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1771 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_x_f_v_i64m1_mu
1772 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1773 // CHECK-RV64-NEXT: entry:
1774 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1775 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1777 vint64m1_t
test_vfwcvt_x_f_v_i64m1_mu(vbool64_t mask
, vint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
1778 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1781 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_x_f_v_i64m2_mu
1782 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1783 // CHECK-RV64-NEXT: entry:
1784 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1785 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1787 vint64m2_t
test_vfwcvt_x_f_v_i64m2_mu(vbool32_t mask
, vint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
1788 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1791 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_x_f_v_i64m4_mu
1792 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1793 // CHECK-RV64-NEXT: entry:
1794 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1795 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1797 vint64m4_t
test_vfwcvt_x_f_v_i64m4_mu(vbool16_t mask
, vint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
1798 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1801 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_x_f_v_i64m8_mu
1802 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1803 // CHECK-RV64-NEXT: entry:
1804 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1805 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1807 vint64m8_t
test_vfwcvt_x_f_v_i64m8_mu(vbool8_t mask
, vint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
1808 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, vl
);
1811 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_xu_f_v_u64m1_mu
1812 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1813 // CHECK-RV64-NEXT: entry:
1814 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1815 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1817 vuint64m1_t
test_vfwcvt_xu_f_v_u64m1_mu(vbool64_t mask
, vuint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
1818 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1821 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_xu_f_v_u64m2_mu
1822 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1823 // CHECK-RV64-NEXT: entry:
1824 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1825 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1827 vuint64m2_t
test_vfwcvt_xu_f_v_u64m2_mu(vbool32_t mask
, vuint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
1828 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1831 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_xu_f_v_u64m4_mu
1832 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1833 // CHECK-RV64-NEXT: entry:
1834 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1835 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1837 vuint64m4_t
test_vfwcvt_xu_f_v_u64m4_mu(vbool16_t mask
, vuint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
1838 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1841 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_xu_f_v_u64m8_mu
1842 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1843 // CHECK-RV64-NEXT: entry:
1844 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 7, i64 [[VL]], i64 1)
1845 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1847 vuint64m8_t
test_vfwcvt_xu_f_v_u64m8_mu(vbool8_t mask
, vuint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
1848 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, vl
);
1851 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfwcvt_f_x_v_f64m1_mu
1852 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1853 // CHECK-RV64-NEXT: entry:
1854 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1855 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
1857 vfloat64m1_t
test_vfwcvt_f_x_v_f64m1_mu(vbool64_t mask
, vfloat64m1_t maskedoff
, vint32mf2_t src
, size_t vl
) {
1858 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1861 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfwcvt_f_x_v_f64m2_mu
1862 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1863 // CHECK-RV64-NEXT: entry:
1864 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1865 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
1867 vfloat64m2_t
test_vfwcvt_f_x_v_f64m2_mu(vbool32_t mask
, vfloat64m2_t maskedoff
, vint32m1_t src
, size_t vl
) {
1868 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1871 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfwcvt_f_x_v_f64m4_mu
1872 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1873 // CHECK-RV64-NEXT: entry:
1874 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1875 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
1877 vfloat64m4_t
test_vfwcvt_f_x_v_f64m4_mu(vbool16_t mask
, vfloat64m4_t maskedoff
, vint32m2_t src
, size_t vl
) {
1878 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1881 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfwcvt_f_x_v_f64m8_mu
1882 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1883 // CHECK-RV64-NEXT: entry:
1884 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1885 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
1887 vfloat64m8_t
test_vfwcvt_f_x_v_f64m8_mu(vbool8_t mask
, vfloat64m8_t maskedoff
, vint32m4_t src
, size_t vl
) {
1888 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1891 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vfwcvt_f_xu_v_f64m1_mu
1892 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1893 // CHECK-RV64-NEXT: entry:
1894 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv1f64.nxv1i32.i64(<vscale x 1 x double> [[MASKEDOFF]], <vscale x 1 x i32> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
1895 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
1897 vfloat64m1_t
test_vfwcvt_f_xu_v_f64m1_mu(vbool64_t mask
, vfloat64m1_t maskedoff
, vuint32mf2_t src
, size_t vl
) {
1898 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1901 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vfwcvt_f_xu_v_f64m2_mu
1902 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1903 // CHECK-RV64-NEXT: entry:
1904 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64(<vscale x 2 x double> [[MASKEDOFF]], <vscale x 2 x i32> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
1905 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
1907 vfloat64m2_t
test_vfwcvt_f_xu_v_f64m2_mu(vbool32_t mask
, vfloat64m2_t maskedoff
, vuint32m1_t src
, size_t vl
) {
1908 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1911 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vfwcvt_f_xu_v_f64m4_mu
1912 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1913 // CHECK-RV64-NEXT: entry:
1914 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64(<vscale x 4 x double> [[MASKEDOFF]], <vscale x 4 x i32> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
1915 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
1917 vfloat64m4_t
test_vfwcvt_f_xu_v_f64m4_mu(vbool16_t mask
, vfloat64m4_t maskedoff
, vuint32m2_t src
, size_t vl
) {
1918 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1921 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vfwcvt_f_xu_v_f64m8_mu
1922 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1923 // CHECK-RV64-NEXT: entry:
1924 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64(<vscale x 8 x double> [[MASKEDOFF]], <vscale x 8 x i32> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
1925 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
1927 vfloat64m8_t
test_vfwcvt_f_xu_v_f64m8_mu(vbool8_t mask
, vfloat64m8_t maskedoff
, vuint32m4_t src
, size_t vl
) {
1928 return __riscv_vfwcvt_f_mu(mask
, maskedoff
, src
, vl
);
1931 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_x_f_v_i32mf2_rm_tu
1932 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1933 // CHECK-RV64-NEXT: entry:
1934 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], i64 0, i64 [[VL]])
1935 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1937 vint32mf2_t
test_vfwcvt_x_f_v_i32mf2_rm_tu(vint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
1938 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
1941 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_x_f_v_i32m1_rm_tu
1942 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1943 // CHECK-RV64-NEXT: entry:
1944 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], i64 0, i64 [[VL]])
1945 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1947 vint32m1_t
test_vfwcvt_x_f_v_i32m1_rm_tu(vint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
1948 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
1951 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_x_f_v_i32m2_rm_tu
1952 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1953 // CHECK-RV64-NEXT: entry:
1954 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], i64 0, i64 [[VL]])
1955 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1957 vint32m2_t
test_vfwcvt_x_f_v_i32m2_rm_tu(vint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
1958 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
1961 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_x_f_v_i32m4_rm_tu
1962 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1963 // CHECK-RV64-NEXT: entry:
1964 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], i64 0, i64 [[VL]])
1965 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1967 vint32m4_t
test_vfwcvt_x_f_v_i32m4_rm_tu(vint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
1968 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
1971 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_x_f_v_i32m8_rm_tu
1972 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1973 // CHECK-RV64-NEXT: entry:
1974 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], i64 0, i64 [[VL]])
1975 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1977 vint32m8_t
test_vfwcvt_x_f_v_i32m8_rm_tu(vint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
1978 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
1981 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_xu_f_v_u32mf2_rm_tu
1982 // CHECK-RV64-SAME: (<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1983 // CHECK-RV64-NEXT: entry:
1984 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], i64 0, i64 [[VL]])
1985 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1987 vuint32mf2_t
test_vfwcvt_xu_f_v_u32mf2_rm_tu(vuint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
1988 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
1991 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_xu_f_v_u32m1_rm_tu
1992 // CHECK-RV64-SAME: (<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
1993 // CHECK-RV64-NEXT: entry:
1994 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], i64 0, i64 [[VL]])
1995 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1997 vuint32m1_t
test_vfwcvt_xu_f_v_u32m1_rm_tu(vuint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
1998 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2001 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_xu_f_v_u32m2_rm_tu
2002 // CHECK-RV64-SAME: (<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2003 // CHECK-RV64-NEXT: entry:
2004 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], i64 0, i64 [[VL]])
2005 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2007 vuint32m2_t
test_vfwcvt_xu_f_v_u32m2_rm_tu(vuint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
2008 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2011 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_xu_f_v_u32m4_rm_tu
2012 // CHECK-RV64-SAME: (<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2013 // CHECK-RV64-NEXT: entry:
2014 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], i64 0, i64 [[VL]])
2015 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2017 vuint32m4_t
test_vfwcvt_xu_f_v_u32m4_rm_tu(vuint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
2018 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2021 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_xu_f_v_u32m8_rm_tu
2022 // CHECK-RV64-SAME: (<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2023 // CHECK-RV64-NEXT: entry:
2024 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], i64 0, i64 [[VL]])
2025 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2027 vuint32m8_t
test_vfwcvt_xu_f_v_u32m8_rm_tu(vuint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
2028 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2031 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_x_f_v_i64m1_rm_tu
2032 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2033 // CHECK-RV64-NEXT: entry:
2034 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 0, i64 [[VL]])
2035 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2037 vint64m1_t
test_vfwcvt_x_f_v_i64m1_rm_tu(vint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
2038 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2041 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_x_f_v_i64m2_rm_tu
2042 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2043 // CHECK-RV64-NEXT: entry:
2044 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 0, i64 [[VL]])
2045 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2047 vint64m2_t
test_vfwcvt_x_f_v_i64m2_rm_tu(vint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
2048 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2051 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_x_f_v_i64m4_rm_tu
2052 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2053 // CHECK-RV64-NEXT: entry:
2054 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 0, i64 [[VL]])
2055 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2057 vint64m4_t
test_vfwcvt_x_f_v_i64m4_rm_tu(vint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
2058 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2061 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_x_f_v_i64m8_rm_tu
2062 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2063 // CHECK-RV64-NEXT: entry:
2064 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 0, i64 [[VL]])
2065 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2067 vint64m8_t
test_vfwcvt_x_f_v_i64m8_rm_tu(vint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
2068 return __riscv_vfwcvt_x_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2071 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_xu_f_v_u64m1_rm_tu
2072 // CHECK-RV64-SAME: (<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2073 // CHECK-RV64-NEXT: entry:
2074 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 0, i64 [[VL]])
2075 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2077 vuint64m1_t
test_vfwcvt_xu_f_v_u64m1_rm_tu(vuint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
2078 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2081 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_xu_f_v_u64m2_rm_tu
2082 // CHECK-RV64-SAME: (<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2083 // CHECK-RV64-NEXT: entry:
2084 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 0, i64 [[VL]])
2085 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2087 vuint64m2_t
test_vfwcvt_xu_f_v_u64m2_rm_tu(vuint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
2088 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2091 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_xu_f_v_u64m4_rm_tu
2092 // CHECK-RV64-SAME: (<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2093 // CHECK-RV64-NEXT: entry:
2094 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 0, i64 [[VL]])
2095 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2097 vuint64m4_t
test_vfwcvt_xu_f_v_u64m4_rm_tu(vuint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
2098 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2101 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_xu_f_v_u64m8_rm_tu
2102 // CHECK-RV64-SAME: (<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2103 // CHECK-RV64-NEXT: entry:
2104 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 0, i64 [[VL]])
2105 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2107 vuint64m8_t
test_vfwcvt_xu_f_v_u64m8_rm_tu(vuint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
2108 return __riscv_vfwcvt_xu_tu(maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2111 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_x_f_v_i32mf2_rm_tum
2112 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2113 // CHECK-RV64-NEXT: entry:
2114 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2115 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2117 vint32mf2_t
test_vfwcvt_x_f_v_i32mf2_rm_tum(vbool64_t mask
, vint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
2118 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2121 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_x_f_v_i32m1_rm_tum
2122 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2123 // CHECK-RV64-NEXT: entry:
2124 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2125 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2127 vint32m1_t
test_vfwcvt_x_f_v_i32m1_rm_tum(vbool32_t mask
, vint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
2128 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2131 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_x_f_v_i32m2_rm_tum
2132 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2133 // CHECK-RV64-NEXT: entry:
2134 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2135 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2137 vint32m2_t
test_vfwcvt_x_f_v_i32m2_rm_tum(vbool16_t mask
, vint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
2138 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2141 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_x_f_v_i32m4_rm_tum
2142 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2143 // CHECK-RV64-NEXT: entry:
2144 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2145 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2147 vint32m4_t
test_vfwcvt_x_f_v_i32m4_rm_tum(vbool8_t mask
, vint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
2148 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2151 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_x_f_v_i32m8_rm_tum
2152 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2153 // CHECK-RV64-NEXT: entry:
2154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2155 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2157 vint32m8_t
test_vfwcvt_x_f_v_i32m8_rm_tum(vbool4_t mask
, vint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
2158 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2161 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_xu_f_v_u32mf2_rm_tum
2162 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2163 // CHECK-RV64-NEXT: entry:
2164 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2165 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2167 vuint32mf2_t
test_vfwcvt_xu_f_v_u32mf2_rm_tum(vbool64_t mask
, vuint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
2168 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2171 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_xu_f_v_u32m1_rm_tum
2172 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2173 // CHECK-RV64-NEXT: entry:
2174 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2175 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2177 vuint32m1_t
test_vfwcvt_xu_f_v_u32m1_rm_tum(vbool32_t mask
, vuint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
2178 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2181 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_xu_f_v_u32m2_rm_tum
2182 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2183 // CHECK-RV64-NEXT: entry:
2184 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2185 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2187 vuint32m2_t
test_vfwcvt_xu_f_v_u32m2_rm_tum(vbool16_t mask
, vuint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
2188 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2191 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_xu_f_v_u32m4_rm_tum
2192 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2193 // CHECK-RV64-NEXT: entry:
2194 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2195 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2197 vuint32m4_t
test_vfwcvt_xu_f_v_u32m4_rm_tum(vbool8_t mask
, vuint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
2198 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2201 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_xu_f_v_u32m8_rm_tum
2202 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2203 // CHECK-RV64-NEXT: entry:
2204 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2205 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2207 vuint32m8_t
test_vfwcvt_xu_f_v_u32m8_rm_tum(vbool4_t mask
, vuint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
2208 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2211 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_x_f_v_i64m1_rm_tum
2212 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2213 // CHECK-RV64-NEXT: entry:
2214 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2215 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2217 vint64m1_t
test_vfwcvt_x_f_v_i64m1_rm_tum(vbool64_t mask
, vint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
2218 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2221 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_x_f_v_i64m2_rm_tum
2222 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2223 // CHECK-RV64-NEXT: entry:
2224 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2225 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2227 vint64m2_t
test_vfwcvt_x_f_v_i64m2_rm_tum(vbool32_t mask
, vint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
2228 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2231 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_x_f_v_i64m4_rm_tum
2232 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2233 // CHECK-RV64-NEXT: entry:
2234 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2235 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2237 vint64m4_t
test_vfwcvt_x_f_v_i64m4_rm_tum(vbool16_t mask
, vint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
2238 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2241 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_x_f_v_i64m8_rm_tum
2242 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2243 // CHECK-RV64-NEXT: entry:
2244 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2245 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2247 vint64m8_t
test_vfwcvt_x_f_v_i64m8_rm_tum(vbool8_t mask
, vint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
2248 return __riscv_vfwcvt_x_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2251 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_xu_f_v_u64m1_rm_tum
2252 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2253 // CHECK-RV64-NEXT: entry:
2254 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2255 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2257 vuint64m1_t
test_vfwcvt_xu_f_v_u64m1_rm_tum(vbool64_t mask
, vuint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
2258 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2261 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_xu_f_v_u64m2_rm_tum
2262 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2263 // CHECK-RV64-NEXT: entry:
2264 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2265 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2267 vuint64m2_t
test_vfwcvt_xu_f_v_u64m2_rm_tum(vbool32_t mask
, vuint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
2268 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2271 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_xu_f_v_u64m4_rm_tum
2272 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2273 // CHECK-RV64-NEXT: entry:
2274 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2275 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2277 vuint64m4_t
test_vfwcvt_xu_f_v_u64m4_rm_tum(vbool16_t mask
, vuint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
2278 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2281 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_xu_f_v_u64m8_rm_tum
2282 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2283 // CHECK-RV64-NEXT: entry:
2284 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 2)
2285 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2287 vuint64m8_t
test_vfwcvt_xu_f_v_u64m8_rm_tum(vbool8_t mask
, vuint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
2288 return __riscv_vfwcvt_xu_tum(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2291 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_x_f_v_i32mf2_rm_tumu
2292 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2293 // CHECK-RV64-NEXT: entry:
2294 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2295 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2297 vint32mf2_t
test_vfwcvt_x_f_v_i32mf2_rm_tumu(vbool64_t mask
, vint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
2298 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2301 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_x_f_v_i32m1_rm_tumu
2302 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2303 // CHECK-RV64-NEXT: entry:
2304 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2305 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2307 vint32m1_t
test_vfwcvt_x_f_v_i32m1_rm_tumu(vbool32_t mask
, vint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
2308 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2311 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_x_f_v_i32m2_rm_tumu
2312 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2313 // CHECK-RV64-NEXT: entry:
2314 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2315 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2317 vint32m2_t
test_vfwcvt_x_f_v_i32m2_rm_tumu(vbool16_t mask
, vint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
2318 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2321 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_x_f_v_i32m4_rm_tumu
2322 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2323 // CHECK-RV64-NEXT: entry:
2324 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2325 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2327 vint32m4_t
test_vfwcvt_x_f_v_i32m4_rm_tumu(vbool8_t mask
, vint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
2328 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2331 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_x_f_v_i32m8_rm_tumu
2332 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2333 // CHECK-RV64-NEXT: entry:
2334 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2335 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2337 vint32m8_t
test_vfwcvt_x_f_v_i32m8_rm_tumu(vbool4_t mask
, vint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
2338 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2341 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_xu_f_v_u32mf2_rm_tumu
2342 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2343 // CHECK-RV64-NEXT: entry:
2344 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2345 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2347 vuint32mf2_t
test_vfwcvt_xu_f_v_u32mf2_rm_tumu(vbool64_t mask
, vuint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
2348 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2351 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_xu_f_v_u32m1_rm_tumu
2352 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2353 // CHECK-RV64-NEXT: entry:
2354 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2355 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2357 vuint32m1_t
test_vfwcvt_xu_f_v_u32m1_rm_tumu(vbool32_t mask
, vuint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
2358 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2361 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_xu_f_v_u32m2_rm_tumu
2362 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2363 // CHECK-RV64-NEXT: entry:
2364 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2365 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2367 vuint32m2_t
test_vfwcvt_xu_f_v_u32m2_rm_tumu(vbool16_t mask
, vuint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
2368 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2371 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_xu_f_v_u32m4_rm_tumu
2372 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2373 // CHECK-RV64-NEXT: entry:
2374 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2375 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2377 vuint32m4_t
test_vfwcvt_xu_f_v_u32m4_rm_tumu(vbool8_t mask
, vuint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
2378 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2381 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_xu_f_v_u32m8_rm_tumu
2382 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2383 // CHECK-RV64-NEXT: entry:
2384 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2385 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2387 vuint32m8_t
test_vfwcvt_xu_f_v_u32m8_rm_tumu(vbool4_t mask
, vuint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
2388 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2391 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_x_f_v_i64m1_rm_tumu
2392 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2393 // CHECK-RV64-NEXT: entry:
2394 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2395 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2397 vint64m1_t
test_vfwcvt_x_f_v_i64m1_rm_tumu(vbool64_t mask
, vint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
2398 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2401 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_x_f_v_i64m2_rm_tumu
2402 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2403 // CHECK-RV64-NEXT: entry:
2404 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2405 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2407 vint64m2_t
test_vfwcvt_x_f_v_i64m2_rm_tumu(vbool32_t mask
, vint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
2408 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2411 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_x_f_v_i64m4_rm_tumu
2412 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2413 // CHECK-RV64-NEXT: entry:
2414 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2415 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2417 vint64m4_t
test_vfwcvt_x_f_v_i64m4_rm_tumu(vbool16_t mask
, vint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
2418 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2421 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_x_f_v_i64m8_rm_tumu
2422 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2423 // CHECK-RV64-NEXT: entry:
2424 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2425 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2427 vint64m8_t
test_vfwcvt_x_f_v_i64m8_rm_tumu(vbool8_t mask
, vint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
2428 return __riscv_vfwcvt_x_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2431 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_xu_f_v_u64m1_rm_tumu
2432 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2433 // CHECK-RV64-NEXT: entry:
2434 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2435 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2437 vuint64m1_t
test_vfwcvt_xu_f_v_u64m1_rm_tumu(vbool64_t mask
, vuint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
2438 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2441 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_xu_f_v_u64m2_rm_tumu
2442 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2443 // CHECK-RV64-NEXT: entry:
2444 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2445 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2447 vuint64m2_t
test_vfwcvt_xu_f_v_u64m2_rm_tumu(vbool32_t mask
, vuint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
2448 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2451 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_xu_f_v_u64m4_rm_tumu
2452 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2453 // CHECK-RV64-NEXT: entry:
2454 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2455 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2457 vuint64m4_t
test_vfwcvt_xu_f_v_u64m4_rm_tumu(vbool16_t mask
, vuint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
2458 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2461 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_xu_f_v_u64m8_rm_tumu
2462 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2463 // CHECK-RV64-NEXT: entry:
2464 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 0)
2465 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2467 vuint64m8_t
test_vfwcvt_xu_f_v_u64m8_rm_tumu(vbool8_t mask
, vuint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
2468 return __riscv_vfwcvt_xu_tumu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2471 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_x_f_v_i32mf2_rm_mu
2472 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2473 // CHECK-RV64-NEXT: entry:
2474 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2475 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2477 vint32mf2_t
test_vfwcvt_x_f_v_i32mf2_rm_mu(vbool64_t mask
, vint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
2478 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2481 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_x_f_v_i32m1_rm_mu
2482 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2483 // CHECK-RV64-NEXT: entry:
2484 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2485 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2487 vint32m1_t
test_vfwcvt_x_f_v_i32m1_rm_mu(vbool32_t mask
, vint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
2488 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2491 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_x_f_v_i32m2_rm_mu
2492 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2493 // CHECK-RV64-NEXT: entry:
2494 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2495 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2497 vint32m2_t
test_vfwcvt_x_f_v_i32m2_rm_mu(vbool16_t mask
, vint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
2498 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2501 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_x_f_v_i32m4_rm_mu
2502 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2503 // CHECK-RV64-NEXT: entry:
2504 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2505 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2507 vint32m4_t
test_vfwcvt_x_f_v_i32m4_rm_mu(vbool8_t mask
, vint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
2508 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2511 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_x_f_v_i32m8_rm_mu
2512 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2513 // CHECK-RV64-NEXT: entry:
2514 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2515 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2517 vint32m8_t
test_vfwcvt_x_f_v_i32m8_rm_mu(vbool4_t mask
, vint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
2518 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2521 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vfwcvt_xu_f_v_u32mf2_rm_mu
2522 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2523 // CHECK-RV64-NEXT: entry:
2524 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> [[MASKEDOFF]], <vscale x 1 x half> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2525 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2527 vuint32mf2_t
test_vfwcvt_xu_f_v_u32mf2_rm_mu(vbool64_t mask
, vuint32mf2_t maskedoff
, vfloat16mf4_t src
, size_t vl
) {
2528 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2531 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vfwcvt_xu_f_v_u32m1_rm_mu
2532 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2533 // CHECK-RV64-NEXT: entry:
2534 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> [[MASKEDOFF]], <vscale x 2 x half> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2535 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2537 vuint32m1_t
test_vfwcvt_xu_f_v_u32m1_rm_mu(vbool32_t mask
, vuint32m1_t maskedoff
, vfloat16mf2_t src
, size_t vl
) {
2538 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2541 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vfwcvt_xu_f_v_u32m2_rm_mu
2542 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2543 // CHECK-RV64-NEXT: entry:
2544 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> [[MASKEDOFF]], <vscale x 4 x half> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2545 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2547 vuint32m2_t
test_vfwcvt_xu_f_v_u32m2_rm_mu(vbool16_t mask
, vuint32m2_t maskedoff
, vfloat16m1_t src
, size_t vl
) {
2548 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2551 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vfwcvt_xu_f_v_u32m4_rm_mu
2552 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2553 // CHECK-RV64-NEXT: entry:
2554 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> [[MASKEDOFF]], <vscale x 8 x half> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2555 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2557 vuint32m4_t
test_vfwcvt_xu_f_v_u32m4_rm_mu(vbool8_t mask
, vuint32m4_t maskedoff
, vfloat16m2_t src
, size_t vl
) {
2558 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2561 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vfwcvt_xu_f_v_u32m8_rm_mu
2562 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2563 // CHECK-RV64-NEXT: entry:
2564 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> [[MASKEDOFF]], <vscale x 16 x half> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2565 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2567 vuint32m8_t
test_vfwcvt_xu_f_v_u32m8_rm_mu(vbool4_t mask
, vuint32m8_t maskedoff
, vfloat16m4_t src
, size_t vl
) {
2568 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2571 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_x_f_v_i64m1_rm_mu
2572 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2573 // CHECK-RV64-NEXT: entry:
2574 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2575 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2577 vint64m1_t
test_vfwcvt_x_f_v_i64m1_rm_mu(vbool64_t mask
, vint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
2578 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2581 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_x_f_v_i64m2_rm_mu
2582 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2583 // CHECK-RV64-NEXT: entry:
2584 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2585 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2587 vint64m2_t
test_vfwcvt_x_f_v_i64m2_rm_mu(vbool32_t mask
, vint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
2588 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2591 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_x_f_v_i64m4_rm_mu
2592 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2593 // CHECK-RV64-NEXT: entry:
2594 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2595 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2597 vint64m4_t
test_vfwcvt_x_f_v_i64m4_rm_mu(vbool16_t mask
, vint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
2598 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2601 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_x_f_v_i64m8_rm_mu
2602 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2603 // CHECK-RV64-NEXT: entry:
2604 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2605 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2607 vint64m8_t
test_vfwcvt_x_f_v_i64m8_rm_mu(vbool8_t mask
, vint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
2608 return __riscv_vfwcvt_x_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2611 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vfwcvt_xu_f_v_u64m1_rm_mu
2612 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2613 // CHECK-RV64-NEXT: entry:
2614 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2615 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2617 vuint64m1_t
test_vfwcvt_xu_f_v_u64m1_rm_mu(vbool64_t mask
, vuint64m1_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
2618 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2621 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vfwcvt_xu_f_v_u64m2_rm_mu
2622 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2623 // CHECK-RV64-NEXT: entry:
2624 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2625 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2627 vuint64m2_t
test_vfwcvt_xu_f_v_u64m2_rm_mu(vbool32_t mask
, vuint64m2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
2628 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2631 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vfwcvt_xu_f_v_u64m4_rm_mu
2632 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2633 // CHECK-RV64-NEXT: entry:
2634 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2635 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2637 vuint64m4_t
test_vfwcvt_xu_f_v_u64m4_rm_mu(vbool16_t mask
, vuint64m4_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
2638 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);
2641 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vfwcvt_xu_f_v_u64m8_rm_mu
2642 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
2643 // CHECK-RV64-NEXT: entry:
2644 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 0, i64 [[VL]], i64 1)
2645 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2647 vuint64m8_t
test_vfwcvt_xu_f_v_u64m8_rm_mu(vbool8_t mask
, vuint64m8_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
2648 return __riscv_vfwcvt_xu_mu(mask
, maskedoff
, src
, __RISCV_FRM_RNE
, vl
);