1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
4 // RUN: -target-feature +zvfh -disable-O0-optnone \
5 // RUN: -emit-llvm %s -o - | opt -S -passes=mem2reg | \
6 // RUN: FileCheck --check-prefix=CHECK-RV64 %s
8 #include <riscv_vector.h>
10 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfncvt_rod_f_f_w_f16mf4_tu
11 // CHECK-RV64-SAME: (<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
12 // CHECK-RV64-NEXT: entry:
13 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], i64 [[VL]])
14 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
16 vfloat16mf4_t
test_vfncvt_rod_f_f_w_f16mf4_tu(vfloat16mf4_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
17 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
20 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfncvt_rod_f_f_w_f16mf2_tu
21 // CHECK-RV64-SAME: (<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
22 // CHECK-RV64-NEXT: entry:
23 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], i64 [[VL]])
24 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
26 vfloat16mf2_t
test_vfncvt_rod_f_f_w_f16mf2_tu(vfloat16mf2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
27 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
30 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfncvt_rod_f_f_w_f16m1_tu
31 // CHECK-RV64-SAME: (<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], i64 [[VL]])
34 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
36 vfloat16m1_t
test_vfncvt_rod_f_f_w_f16m1_tu(vfloat16m1_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
37 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
40 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfncvt_rod_f_f_w_f16m2_tu
41 // CHECK-RV64-SAME: (<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], i64 [[VL]])
44 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
46 vfloat16m2_t
test_vfncvt_rod_f_f_w_f16m2_tu(vfloat16m2_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
47 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
50 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfncvt_rod_f_f_w_f16m4_tu
51 // CHECK-RV64-SAME: (<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
52 // CHECK-RV64-NEXT: entry:
53 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], i64 [[VL]])
54 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
56 vfloat16m4_t
test_vfncvt_rod_f_f_w_f16m4_tu(vfloat16m4_t maskedoff
, vfloat32m8_t src
, size_t vl
) {
57 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
60 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfncvt_rod_f_f_w_f32mf2_tu
61 // CHECK-RV64-SAME: (<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], i64 [[VL]])
64 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
66 vfloat32mf2_t
test_vfncvt_rod_f_f_w_f32mf2_tu(vfloat32mf2_t maskedoff
, vfloat64m1_t src
, size_t vl
) {
67 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
70 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfncvt_rod_f_f_w_f32m1_tu
71 // CHECK-RV64-SAME: (<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
72 // CHECK-RV64-NEXT: entry:
73 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], i64 [[VL]])
74 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
76 vfloat32m1_t
test_vfncvt_rod_f_f_w_f32m1_tu(vfloat32m1_t maskedoff
, vfloat64m2_t src
, size_t vl
) {
77 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
80 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfncvt_rod_f_f_w_f32m2_tu
81 // CHECK-RV64-SAME: (<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
82 // CHECK-RV64-NEXT: entry:
83 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], i64 [[VL]])
84 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
86 vfloat32m2_t
test_vfncvt_rod_f_f_w_f32m2_tu(vfloat32m2_t maskedoff
, vfloat64m4_t src
, size_t vl
) {
87 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
90 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfncvt_rod_f_f_w_f32m4_tu
91 // CHECK-RV64-SAME: (<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
92 // CHECK-RV64-NEXT: entry:
93 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], i64 [[VL]])
94 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
96 vfloat32m4_t
test_vfncvt_rod_f_f_w_f32m4_tu(vfloat32m4_t maskedoff
, vfloat64m8_t src
, size_t vl
) {
97 return __riscv_vfncvt_rod_f_tu(maskedoff
, src
, vl
);
100 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfncvt_rod_f_f_w_f16mf4_tum
101 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
104 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
106 vfloat16mf4_t
test_vfncvt_rod_f_f_w_f16mf4_tum(vbool64_t mask
, vfloat16mf4_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
107 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
110 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfncvt_rod_f_f_w_f16mf2_tum
111 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
112 // CHECK-RV64-NEXT: entry:
113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
114 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
116 vfloat16mf2_t
test_vfncvt_rod_f_f_w_f16mf2_tum(vbool32_t mask
, vfloat16mf2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
117 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
120 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfncvt_rod_f_f_w_f16m1_tum
121 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
122 // CHECK-RV64-NEXT: entry:
123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
124 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
126 vfloat16m1_t
test_vfncvt_rod_f_f_w_f16m1_tum(vbool16_t mask
, vfloat16m1_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
127 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
130 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfncvt_rod_f_f_w_f16m2_tum
131 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
132 // CHECK-RV64-NEXT: entry:
133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
134 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
136 vfloat16m2_t
test_vfncvt_rod_f_f_w_f16m2_tum(vbool8_t mask
, vfloat16m2_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
137 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
140 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfncvt_rod_f_f_w_f16m4_tum
141 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 2)
144 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
146 vfloat16m4_t
test_vfncvt_rod_f_f_w_f16m4_tum(vbool4_t mask
, vfloat16m4_t maskedoff
, vfloat32m8_t src
, size_t vl
) {
147 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
150 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfncvt_rod_f_f_w_f32mf2_tum
151 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 2)
154 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
156 vfloat32mf2_t
test_vfncvt_rod_f_f_w_f32mf2_tum(vbool64_t mask
, vfloat32mf2_t maskedoff
, vfloat64m1_t src
, size_t vl
) {
157 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
160 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfncvt_rod_f_f_w_f32m1_tum
161 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
162 // CHECK-RV64-NEXT: entry:
163 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 2)
164 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
166 vfloat32m1_t
test_vfncvt_rod_f_f_w_f32m1_tum(vbool32_t mask
, vfloat32m1_t maskedoff
, vfloat64m2_t src
, size_t vl
) {
167 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
170 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfncvt_rod_f_f_w_f32m2_tum
171 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 2)
174 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
176 vfloat32m2_t
test_vfncvt_rod_f_f_w_f32m2_tum(vbool16_t mask
, vfloat32m2_t maskedoff
, vfloat64m4_t src
, size_t vl
) {
177 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
180 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfncvt_rod_f_f_w_f32m4_tum
181 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
182 // CHECK-RV64-NEXT: entry:
183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 2)
184 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
186 vfloat32m4_t
test_vfncvt_rod_f_f_w_f32m4_tum(vbool8_t mask
, vfloat32m4_t maskedoff
, vfloat64m8_t src
, size_t vl
) {
187 return __riscv_vfncvt_rod_f_tum(mask
, maskedoff
, src
, vl
);
190 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfncvt_rod_f_f_w_f16mf4_tumu
191 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
192 // CHECK-RV64-NEXT: entry:
193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
194 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
196 vfloat16mf4_t
test_vfncvt_rod_f_f_w_f16mf4_tumu(vbool64_t mask
, vfloat16mf4_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
197 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
200 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfncvt_rod_f_f_w_f16mf2_tumu
201 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
202 // CHECK-RV64-NEXT: entry:
203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
204 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
206 vfloat16mf2_t
test_vfncvt_rod_f_f_w_f16mf2_tumu(vbool32_t mask
, vfloat16mf2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
207 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
210 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfncvt_rod_f_f_w_f16m1_tumu
211 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
212 // CHECK-RV64-NEXT: entry:
213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
214 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
216 vfloat16m1_t
test_vfncvt_rod_f_f_w_f16m1_tumu(vbool16_t mask
, vfloat16m1_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
217 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
220 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfncvt_rod_f_f_w_f16m2_tumu
221 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
222 // CHECK-RV64-NEXT: entry:
223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
224 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
226 vfloat16m2_t
test_vfncvt_rod_f_f_w_f16m2_tumu(vbool8_t mask
, vfloat16m2_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
227 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
230 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfncvt_rod_f_f_w_f16m4_tumu
231 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
232 // CHECK-RV64-NEXT: entry:
233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 0)
234 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
236 vfloat16m4_t
test_vfncvt_rod_f_f_w_f16m4_tumu(vbool4_t mask
, vfloat16m4_t maskedoff
, vfloat32m8_t src
, size_t vl
) {
237 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
240 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfncvt_rod_f_f_w_f32mf2_tumu
241 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 0)
244 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
246 vfloat32mf2_t
test_vfncvt_rod_f_f_w_f32mf2_tumu(vbool64_t mask
, vfloat32mf2_t maskedoff
, vfloat64m1_t src
, size_t vl
) {
247 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
250 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfncvt_rod_f_f_w_f32m1_tumu
251 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 0)
254 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
256 vfloat32m1_t
test_vfncvt_rod_f_f_w_f32m1_tumu(vbool32_t mask
, vfloat32m1_t maskedoff
, vfloat64m2_t src
, size_t vl
) {
257 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
260 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfncvt_rod_f_f_w_f32m2_tumu
261 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 0)
264 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
266 vfloat32m2_t
test_vfncvt_rod_f_f_w_f32m2_tumu(vbool16_t mask
, vfloat32m2_t maskedoff
, vfloat64m4_t src
, size_t vl
) {
267 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
270 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfncvt_rod_f_f_w_f32m4_tumu
271 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
272 // CHECK-RV64-NEXT: entry:
273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 0)
274 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
276 vfloat32m4_t
test_vfncvt_rod_f_f_w_f32m4_tumu(vbool8_t mask
, vfloat32m4_t maskedoff
, vfloat64m8_t src
, size_t vl
) {
277 return __riscv_vfncvt_rod_f_tumu(mask
, maskedoff
, src
, vl
);
280 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vfncvt_rod_f_f_w_f16mf4_mu
281 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
282 // CHECK-RV64-NEXT: entry:
283 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f16.nxv1f32.i64(<vscale x 1 x half> [[MASKEDOFF]], <vscale x 1 x float> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
284 // CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
286 vfloat16mf4_t
test_vfncvt_rod_f_f_w_f16mf4_mu(vbool64_t mask
, vfloat16mf4_t maskedoff
, vfloat32mf2_t src
, size_t vl
) {
287 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);
290 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vfncvt_rod_f_f_w_f16mf2_mu
291 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
292 // CHECK-RV64-NEXT: entry:
293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f16.nxv2f32.i64(<vscale x 2 x half> [[MASKEDOFF]], <vscale x 2 x float> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
294 // CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
296 vfloat16mf2_t
test_vfncvt_rod_f_f_w_f16mf2_mu(vbool32_t mask
, vfloat16mf2_t maskedoff
, vfloat32m1_t src
, size_t vl
) {
297 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);
300 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vfncvt_rod_f_f_w_f16m1_mu
301 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
302 // CHECK-RV64-NEXT: entry:
303 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f16.nxv4f32.i64(<vscale x 4 x half> [[MASKEDOFF]], <vscale x 4 x float> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
304 // CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
306 vfloat16m1_t
test_vfncvt_rod_f_f_w_f16m1_mu(vbool16_t mask
, vfloat16m1_t maskedoff
, vfloat32m2_t src
, size_t vl
) {
307 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);
310 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vfncvt_rod_f_f_w_f16m2_mu
311 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f16.nxv8f32.i64(<vscale x 8 x half> [[MASKEDOFF]], <vscale x 8 x float> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
314 // CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
316 vfloat16m2_t
test_vfncvt_rod_f_f_w_f16m2_mu(vbool8_t mask
, vfloat16m2_t maskedoff
, vfloat32m4_t src
, size_t vl
) {
317 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);
320 // CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vfncvt_rod_f_f_w_f16m4_mu
321 // CHECK-RV64-SAME: (<vscale x 16 x i1> [[MASK:%.*]], <vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
322 // CHECK-RV64-NEXT: entry:
323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv16f16.nxv16f32.i64(<vscale x 16 x half> [[MASKEDOFF]], <vscale x 16 x float> [[SRC]], <vscale x 16 x i1> [[MASK]], i64 [[VL]], i64 1)
324 // CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
326 vfloat16m4_t
test_vfncvt_rod_f_f_w_f16m4_mu(vbool4_t mask
, vfloat16m4_t maskedoff
, vfloat32m8_t src
, size_t vl
) {
327 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);
330 // CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vfncvt_rod_f_f_w_f32mf2_mu
331 // CHECK-RV64-SAME: (<vscale x 1 x i1> [[MASK:%.*]], <vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv1f32.nxv1f64.i64(<vscale x 1 x float> [[MASKEDOFF]], <vscale x 1 x double> [[SRC]], <vscale x 1 x i1> [[MASK]], i64 [[VL]], i64 1)
334 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
336 vfloat32mf2_t
test_vfncvt_rod_f_f_w_f32mf2_mu(vbool64_t mask
, vfloat32mf2_t maskedoff
, vfloat64m1_t src
, size_t vl
) {
337 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);
340 // CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vfncvt_rod_f_f_w_f32m1_mu
341 // CHECK-RV64-SAME: (<vscale x 2 x i1> [[MASK:%.*]], <vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
342 // CHECK-RV64-NEXT: entry:
343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv2f32.nxv2f64.i64(<vscale x 2 x float> [[MASKEDOFF]], <vscale x 2 x double> [[SRC]], <vscale x 2 x i1> [[MASK]], i64 [[VL]], i64 1)
344 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
346 vfloat32m1_t
test_vfncvt_rod_f_f_w_f32m1_mu(vbool32_t mask
, vfloat32m1_t maskedoff
, vfloat64m2_t src
, size_t vl
) {
347 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);
350 // CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vfncvt_rod_f_f_w_f32m2_mu
351 // CHECK-RV64-SAME: (<vscale x 4 x i1> [[MASK:%.*]], <vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
352 // CHECK-RV64-NEXT: entry:
353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv4f32.nxv4f64.i64(<vscale x 4 x float> [[MASKEDOFF]], <vscale x 4 x double> [[SRC]], <vscale x 4 x i1> [[MASK]], i64 [[VL]], i64 1)
354 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
356 vfloat32m2_t
test_vfncvt_rod_f_f_w_f32m2_mu(vbool16_t mask
, vfloat32m2_t maskedoff
, vfloat64m4_t src
, size_t vl
) {
357 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);
360 // CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vfncvt_rod_f_f_w_f32m4_mu
361 // CHECK-RV64-SAME: (<vscale x 8 x i1> [[MASK:%.*]], <vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.mask.nxv8f32.nxv8f64.i64(<vscale x 8 x float> [[MASKEDOFF]], <vscale x 8 x double> [[SRC]], <vscale x 8 x i1> [[MASK]], i64 [[VL]], i64 1)
364 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
366 vfloat32m4_t
test_vfncvt_rod_f_f_w_f32m4_mu(vbool8_t mask
, vfloat32m4_t maskedoff
, vfloat64m8_t src
, size_t vl
) {
367 return __riscv_vfncvt_rod_f_mu(mask
, maskedoff
, src
, vl
);