1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
4 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 #include <sifive_vector.h>
9 #define p24_20 (0b11111)
10 #define p11_7 (0b11111)
13 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf8(
14 // CHECK-RV32-NEXT: entry:
15 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e8mf8.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
16 // CHECK-RV32-NEXT: ret void
18 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf8(
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e8mf8.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT: ret void
23 void test_sf_vc_x_se_u8mf8(uint8_t rs1
, size_t vl
) {
24 __riscv_sf_vc_x_se_u8mf8(p27_26
, p24_20
, p11_7
, rs1
, vl
);
27 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf4(
28 // CHECK-RV32-NEXT: entry:
29 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e8mf4.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
30 // CHECK-RV32-NEXT: ret void
32 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf4(
33 // CHECK-RV64-NEXT: entry:
34 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e8mf4.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
35 // CHECK-RV64-NEXT: ret void
37 void test_sf_vc_x_se_u8mf4(uint8_t rs1
, size_t vl
) {
38 __riscv_sf_vc_x_se_u8mf4(p27_26
, p24_20
, p11_7
, rs1
, vl
);
41 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8mf2(
42 // CHECK-RV32-NEXT: entry:
43 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e8mf2.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
44 // CHECK-RV32-NEXT: ret void
46 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8mf2(
47 // CHECK-RV64-NEXT: entry:
48 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e8mf2.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
49 // CHECK-RV64-NEXT: ret void
51 void test_sf_vc_x_se_u8mf2(uint8_t rs1
, size_t vl
) {
52 __riscv_sf_vc_x_se_u8mf2(p27_26
, p24_20
, p11_7
, rs1
, vl
);
55 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m1(
56 // CHECK-RV32-NEXT: entry:
57 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e8m1.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
58 // CHECK-RV32-NEXT: ret void
60 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m1(
61 // CHECK-RV64-NEXT: entry:
62 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e8m1.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
63 // CHECK-RV64-NEXT: ret void
65 void test_sf_vc_x_se_u8m1(uint8_t rs1
, size_t vl
) {
66 __riscv_sf_vc_x_se_u8m1(p27_26
, p24_20
, p11_7
, rs1
, vl
);
69 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m2(
70 // CHECK-RV32-NEXT: entry:
71 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e8m2.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
72 // CHECK-RV32-NEXT: ret void
74 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m2(
75 // CHECK-RV64-NEXT: entry:
76 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e8m2.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
77 // CHECK-RV64-NEXT: ret void
79 void test_sf_vc_x_se_u8m2(uint8_t rs1
, size_t vl
) {
80 __riscv_sf_vc_x_se_u8m2(p27_26
, p24_20
, p11_7
, rs1
, vl
);
83 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m4(
84 // CHECK-RV32-NEXT: entry:
85 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e8m4.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
86 // CHECK-RV32-NEXT: ret void
88 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m4(
89 // CHECK-RV64-NEXT: entry:
90 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e8m4.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT: ret void
93 void test_sf_vc_x_se_u8m4(uint8_t rs1
, size_t vl
) {
94 __riscv_sf_vc_x_se_u8m4(p27_26
, p24_20
, p11_7
, rs1
, vl
);
97 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u8m8(
98 // CHECK-RV32-NEXT: entry:
99 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e8m8.i32.i8.i32(i32 3, i32 31, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
100 // CHECK-RV32-NEXT: ret void
102 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u8m8(
103 // CHECK-RV64-NEXT: entry:
104 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e8m8.i64.i8.i64(i64 3, i64 31, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
105 // CHECK-RV64-NEXT: ret void
107 void test_sf_vc_x_se_u8m8(uint8_t rs1
, size_t vl
) {
108 __riscv_sf_vc_x_se_u8m8(p27_26
, p24_20
, p11_7
, rs1
, vl
);
111 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16mf4(
112 // CHECK-RV32-NEXT: entry:
113 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e16mf4.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
114 // CHECK-RV32-NEXT: ret void
116 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16mf4(
117 // CHECK-RV64-NEXT: entry:
118 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e16mf4.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
119 // CHECK-RV64-NEXT: ret void
121 void test_sf_vc_x_se_u16mf4(uint16_t rs1
, size_t vl
) {
122 __riscv_sf_vc_x_se_u16mf4(p27_26
, p24_20
, p11_7
, rs1
, vl
);
125 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16mf2(
126 // CHECK-RV32-NEXT: entry:
127 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e16mf2.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
128 // CHECK-RV32-NEXT: ret void
130 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16mf2(
131 // CHECK-RV64-NEXT: entry:
132 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e16mf2.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
133 // CHECK-RV64-NEXT: ret void
135 void test_sf_vc_x_se_u16mf2(uint16_t rs1
, size_t vl
) {
136 __riscv_sf_vc_x_se_u16mf2(p27_26
, p24_20
, p11_7
, rs1
, vl
);
139 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m1(
140 // CHECK-RV32-NEXT: entry:
141 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e16m1.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
142 // CHECK-RV32-NEXT: ret void
144 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m1(
145 // CHECK-RV64-NEXT: entry:
146 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e16m1.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
147 // CHECK-RV64-NEXT: ret void
149 void test_sf_vc_x_se_u16m1(uint16_t rs1
, size_t vl
) {
150 __riscv_sf_vc_x_se_u16m1(p27_26
, p24_20
, p11_7
, rs1
, vl
);
153 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m2(
154 // CHECK-RV32-NEXT: entry:
155 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e16m2.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
156 // CHECK-RV32-NEXT: ret void
158 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m2(
159 // CHECK-RV64-NEXT: entry:
160 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e16m2.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT: ret void
163 void test_sf_vc_x_se_u16m2(uint16_t rs1
, size_t vl
) {
164 __riscv_sf_vc_x_se_u16m2(p27_26
, p24_20
, p11_7
, rs1
, vl
);
167 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m4(
168 // CHECK-RV32-NEXT: entry:
169 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e16m4.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
170 // CHECK-RV32-NEXT: ret void
172 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m4(
173 // CHECK-RV64-NEXT: entry:
174 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e16m4.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
175 // CHECK-RV64-NEXT: ret void
177 void test_sf_vc_x_se_u16m4(uint16_t rs1
, size_t vl
) {
178 __riscv_sf_vc_x_se_u16m4(p27_26
, p24_20
, p11_7
, rs1
, vl
);
181 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u16m8(
182 // CHECK-RV32-NEXT: entry:
183 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e16m8.i32.i16.i32(i32 3, i32 31, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
184 // CHECK-RV32-NEXT: ret void
186 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u16m8(
187 // CHECK-RV64-NEXT: entry:
188 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e16m8.i64.i16.i64(i64 3, i64 31, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
189 // CHECK-RV64-NEXT: ret void
191 void test_sf_vc_x_se_u16m8(uint16_t rs1
, size_t vl
) {
192 __riscv_sf_vc_x_se_u16m8(p27_26
, p24_20
, p11_7
, rs1
, vl
);
195 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32mf2(
196 // CHECK-RV32-NEXT: entry:
197 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e32mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
198 // CHECK-RV32-NEXT: ret void
200 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32mf2(
201 // CHECK-RV64-NEXT: entry:
202 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e32mf2.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
203 // CHECK-RV64-NEXT: ret void
205 void test_sf_vc_x_se_u32mf2(uint32_t rs1
, size_t vl
) {
206 __riscv_sf_vc_x_se_u32mf2(p27_26
, p24_20
, p11_7
, rs1
, vl
);
209 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m1(
210 // CHECK-RV32-NEXT: entry:
211 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e32m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
212 // CHECK-RV32-NEXT: ret void
214 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m1(
215 // CHECK-RV64-NEXT: entry:
216 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e32m1.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
217 // CHECK-RV64-NEXT: ret void
219 void test_sf_vc_x_se_u32m1(uint32_t rs1
, size_t vl
) {
220 __riscv_sf_vc_x_se_u32m1(p27_26
, p24_20
, p11_7
, rs1
, vl
);
223 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m2(
224 // CHECK-RV32-NEXT: entry:
225 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e32m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
226 // CHECK-RV32-NEXT: ret void
228 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m2(
229 // CHECK-RV64-NEXT: entry:
230 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e32m2.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT: ret void
233 void test_sf_vc_x_se_u32m2(uint32_t rs1
, size_t vl
) {
234 __riscv_sf_vc_x_se_u32m2(p27_26
, p24_20
, p11_7
, rs1
, vl
);
237 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m4(
238 // CHECK-RV32-NEXT: entry:
239 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e32m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
240 // CHECK-RV32-NEXT: ret void
242 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m4(
243 // CHECK-RV64-NEXT: entry:
244 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e32m4.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
245 // CHECK-RV64-NEXT: ret void
247 void test_sf_vc_x_se_u32m4(uint32_t rs1
, size_t vl
) {
248 __riscv_sf_vc_x_se_u32m4(p27_26
, p24_20
, p11_7
, rs1
, vl
);
251 // CHECK-RV32-LABEL: @test_sf_vc_x_se_u32m8(
252 // CHECK-RV32-NEXT: entry:
253 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.x.se.e32m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
254 // CHECK-RV32-NEXT: ret void
256 // CHECK-RV64-LABEL: @test_sf_vc_x_se_u32m8(
257 // CHECK-RV64-NEXT: entry:
258 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.x.se.e32m8.i64.i32.i64(i64 3, i64 31, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
259 // CHECK-RV64-NEXT: ret void
261 void test_sf_vc_x_se_u32m8(uint32_t rs1
, size_t vl
) {
262 __riscv_sf_vc_x_se_u32m8(p27_26
, p24_20
, p11_7
, rs1
, vl
);
265 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8mf8(
266 // CHECK-RV32-NEXT: entry:
267 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
268 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
270 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8mf8(
271 // CHECK-RV64-NEXT: entry:
272 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.se.nxv1i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
273 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
275 vuint8mf8_t
test_sf_vc_v_x_se_u8mf8(uint8_t rs1
, size_t vl
) {
276 return __riscv_sf_vc_v_x_se_u8mf8(p27_26
, p24_20
, rs1
, vl
);
279 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8mf4(
280 // CHECK-RV32-NEXT: entry:
281 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
282 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
284 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8mf4(
285 // CHECK-RV64-NEXT: entry:
286 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.se.nxv2i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
287 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
289 vuint8mf4_t
test_sf_vc_v_x_se_u8mf4(uint8_t rs1
, size_t vl
) {
290 return __riscv_sf_vc_v_x_se_u8mf4(p27_26
, p24_20
, rs1
, vl
);
293 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8mf2(
294 // CHECK-RV32-NEXT: entry:
295 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
296 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
298 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8mf2(
299 // CHECK-RV64-NEXT: entry:
300 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.se.nxv4i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
303 vuint8mf2_t
test_sf_vc_v_x_se_u8mf2(uint8_t rs1
, size_t vl
) {
304 return __riscv_sf_vc_v_x_se_u8mf2(p27_26
, p24_20
, rs1
, vl
);
307 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8m1(
308 // CHECK-RV32-NEXT: entry:
309 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
310 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
312 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8m1(
313 // CHECK-RV64-NEXT: entry:
314 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.se.nxv8i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
315 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
317 vuint8m1_t
test_sf_vc_v_x_se_u8m1(uint8_t rs1
, size_t vl
) {
318 return __riscv_sf_vc_v_x_se_u8m1(p27_26
, p24_20
, rs1
, vl
);
321 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8m2(
322 // CHECK-RV32-NEXT: entry:
323 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
324 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
326 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8m2(
327 // CHECK-RV64-NEXT: entry:
328 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.se.nxv16i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
329 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
331 vuint8m2_t
test_sf_vc_v_x_se_u8m2(uint8_t rs1
, size_t vl
) {
332 return __riscv_sf_vc_v_x_se_u8m2(p27_26
, p24_20
, rs1
, vl
);
335 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8m4(
336 // CHECK-RV32-NEXT: entry:
337 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
338 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
340 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8m4(
341 // CHECK-RV64-NEXT: entry:
342 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.se.nxv32i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
343 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
345 vuint8m4_t
test_sf_vc_v_x_se_u8m4(uint8_t rs1
, size_t vl
) {
346 return __riscv_sf_vc_v_x_se_u8m4(p27_26
, p24_20
, rs1
, vl
);
349 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u8m8(
350 // CHECK-RV32-NEXT: entry:
351 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
352 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
354 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u8m8(
355 // CHECK-RV64-NEXT: entry:
356 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.se.nxv64i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
357 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
359 vuint8m8_t
test_sf_vc_v_x_se_u8m8(uint8_t rs1
, size_t vl
) {
360 return __riscv_sf_vc_v_x_se_u8m8(p27_26
, p24_20
, rs1
, vl
);
363 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16mf4(
364 // CHECK-RV32-NEXT: entry:
365 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
366 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
368 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16mf4(
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.se.nxv1i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
373 vuint16mf4_t
test_sf_vc_v_x_se_u16mf4(uint16_t rs1
, size_t vl
) {
374 return __riscv_sf_vc_v_x_se_u16mf4(p27_26
, p24_20
, rs1
, vl
);
377 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16mf2(
378 // CHECK-RV32-NEXT: entry:
379 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
380 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
382 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16mf2(
383 // CHECK-RV64-NEXT: entry:
384 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.se.nxv2i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
385 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
387 vuint16mf2_t
test_sf_vc_v_x_se_u16mf2(uint16_t rs1
, size_t vl
) {
388 return __riscv_sf_vc_v_x_se_u16mf2(p27_26
, p24_20
, rs1
, vl
);
391 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16m1(
392 // CHECK-RV32-NEXT: entry:
393 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
394 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
396 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16m1(
397 // CHECK-RV64-NEXT: entry:
398 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.se.nxv4i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
399 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
401 vuint16m1_t
test_sf_vc_v_x_se_u16m1(uint16_t rs1
, size_t vl
) {
402 return __riscv_sf_vc_v_x_se_u16m1(p27_26
, p24_20
, rs1
, vl
);
405 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16m2(
406 // CHECK-RV32-NEXT: entry:
407 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
408 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
410 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16m2(
411 // CHECK-RV64-NEXT: entry:
412 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.se.nxv8i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
413 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
415 vuint16m2_t
test_sf_vc_v_x_se_u16m2(uint16_t rs1
, size_t vl
) {
416 return __riscv_sf_vc_v_x_se_u16m2(p27_26
, p24_20
, rs1
, vl
);
419 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16m4(
420 // CHECK-RV32-NEXT: entry:
421 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
422 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
424 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16m4(
425 // CHECK-RV64-NEXT: entry:
426 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.se.nxv16i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
427 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
429 vuint16m4_t
test_sf_vc_v_x_se_u16m4(uint16_t rs1
, size_t vl
) {
430 return __riscv_sf_vc_v_x_se_u16m4(p27_26
, p24_20
, rs1
, vl
);
433 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u16m8(
434 // CHECK-RV32-NEXT: entry:
435 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
436 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
438 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u16m8(
439 // CHECK-RV64-NEXT: entry:
440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.se.nxv32i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
443 vuint16m8_t
test_sf_vc_v_x_se_u16m8(uint16_t rs1
, size_t vl
) {
444 return __riscv_sf_vc_v_x_se_u16m8(p27_26
, p24_20
, rs1
, vl
);
447 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32mf2(
448 // CHECK-RV32-NEXT: entry:
449 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
450 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
452 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32mf2(
453 // CHECK-RV64-NEXT: entry:
454 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.se.nxv1i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
455 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
457 vuint32mf2_t
test_sf_vc_v_x_se_u32mf2(uint32_t rs1
, size_t vl
) {
458 return __riscv_sf_vc_v_x_se_u32mf2(p27_26
, p24_20
, rs1
, vl
);
461 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32m1(
462 // CHECK-RV32-NEXT: entry:
463 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
464 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
466 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32m1(
467 // CHECK-RV64-NEXT: entry:
468 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.se.nxv2i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
469 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
471 vuint32m1_t
test_sf_vc_v_x_se_u32m1(uint32_t rs1
, size_t vl
) {
472 return __riscv_sf_vc_v_x_se_u32m1(p27_26
, p24_20
, rs1
, vl
);
475 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32m2(
476 // CHECK-RV32-NEXT: entry:
477 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
478 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
480 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32m2(
481 // CHECK-RV64-NEXT: entry:
482 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.se.nxv4i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
483 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
485 vuint32m2_t
test_sf_vc_v_x_se_u32m2(uint32_t rs1
, size_t vl
) {
486 return __riscv_sf_vc_v_x_se_u32m2(p27_26
, p24_20
, rs1
, vl
);
489 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32m4(
490 // CHECK-RV32-NEXT: entry:
491 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
492 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
494 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32m4(
495 // CHECK-RV64-NEXT: entry:
496 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.se.nxv8i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
497 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
499 vuint32m4_t
test_sf_vc_v_x_se_u32m4(uint32_t rs1
, size_t vl
) {
500 return __riscv_sf_vc_v_x_se_u32m4(p27_26
, p24_20
, rs1
, vl
);
503 // CHECK-RV32-LABEL: @test_sf_vc_v_x_se_u32m8(
504 // CHECK-RV32-NEXT: entry:
505 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
506 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
508 // CHECK-RV64-LABEL: @test_sf_vc_v_x_se_u32m8(
509 // CHECK-RV64-NEXT: entry:
510 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.se.nxv16i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
513 vuint32m8_t
test_sf_vc_v_x_se_u32m8(uint32_t rs1
, size_t vl
) {
514 return __riscv_sf_vc_v_x_se_u32m8(p27_26
, p24_20
, rs1
, vl
);
517 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u8mf8(
518 // CHECK-RV32-NEXT: entry:
519 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
520 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
522 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u8mf8(
523 // CHECK-RV64-NEXT: entry:
524 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.x.nxv1i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
525 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
527 vuint8mf8_t
test_sf_vc_v_x_u8mf8(uint8_t rs1
, size_t vl
) {
528 return __riscv_sf_vc_v_x_u8mf8(p27_26
, p24_20
, rs1
, vl
);
531 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u8mf4(
532 // CHECK-RV32-NEXT: entry:
533 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
534 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
536 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u8mf4(
537 // CHECK-RV64-NEXT: entry:
538 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.x.nxv2i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
539 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
541 vuint8mf4_t
test_sf_vc_v_x_u8mf4(uint8_t rs1
, size_t vl
) {
542 return __riscv_sf_vc_v_x_u8mf4(p27_26
, p24_20
, rs1
, vl
);
545 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u8mf2(
546 // CHECK-RV32-NEXT: entry:
547 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
548 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
550 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u8mf2(
551 // CHECK-RV64-NEXT: entry:
552 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.x.nxv4i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
553 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
555 vuint8mf2_t
test_sf_vc_v_x_u8mf2(uint8_t rs1
, size_t vl
) {
556 return __riscv_sf_vc_v_x_u8mf2(p27_26
, p24_20
, rs1
, vl
);
559 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u8m1(
560 // CHECK-RV32-NEXT: entry:
561 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
562 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
564 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u8m1(
565 // CHECK-RV64-NEXT: entry:
566 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.x.nxv8i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
567 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
569 vuint8m1_t
test_sf_vc_v_x_u8m1(uint8_t rs1
, size_t vl
) {
570 return __riscv_sf_vc_v_x_u8m1(p27_26
, p24_20
, rs1
, vl
);
573 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u8m2(
574 // CHECK-RV32-NEXT: entry:
575 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
576 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
578 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u8m2(
579 // CHECK-RV64-NEXT: entry:
580 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.x.nxv16i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
583 vuint8m2_t
test_sf_vc_v_x_u8m2(uint8_t rs1
, size_t vl
) {
584 return __riscv_sf_vc_v_x_u8m2(p27_26
, p24_20
, rs1
, vl
);
587 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u8m4(
588 // CHECK-RV32-NEXT: entry:
589 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
590 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
592 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u8m4(
593 // CHECK-RV64-NEXT: entry:
594 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.x.nxv32i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
597 vuint8m4_t
test_sf_vc_v_x_u8m4(uint8_t rs1
, size_t vl
) {
598 return __riscv_sf_vc_v_x_u8m4(p27_26
, p24_20
, rs1
, vl
);
601 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u8m8(
602 // CHECK-RV32-NEXT: entry:
603 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.i32.i8.i32(i32 3, i32 31, i8 [[RS1:%.*]], i32 [[VL:%.*]])
604 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
606 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u8m8(
607 // CHECK-RV64-NEXT: entry:
608 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.x.nxv64i8.i64.i8.i64(i64 3, i64 31, i8 [[RS1:%.*]], i64 [[VL:%.*]])
609 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
611 vuint8m8_t
test_sf_vc_v_x_u8m8(uint8_t rs1
, size_t vl
) {
612 return __riscv_sf_vc_v_x_u8m8(p27_26
, p24_20
, rs1
, vl
);
615 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u16mf4(
616 // CHECK-RV32-NEXT: entry:
617 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
618 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
620 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u16mf4(
621 // CHECK-RV64-NEXT: entry:
622 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.x.nxv1i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
623 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
625 vuint16mf4_t
test_sf_vc_v_x_u16mf4(uint16_t rs1
, size_t vl
) {
626 return __riscv_sf_vc_v_x_u16mf4(p27_26
, p24_20
, rs1
, vl
);
629 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u16mf2(
630 // CHECK-RV32-NEXT: entry:
631 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
632 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
634 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u16mf2(
635 // CHECK-RV64-NEXT: entry:
636 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.x.nxv2i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
637 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
639 vuint16mf2_t
test_sf_vc_v_x_u16mf2(uint16_t rs1
, size_t vl
) {
640 return __riscv_sf_vc_v_x_u16mf2(p27_26
, p24_20
, rs1
, vl
);
643 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u16m1(
644 // CHECK-RV32-NEXT: entry:
645 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
646 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
648 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u16m1(
649 // CHECK-RV64-NEXT: entry:
650 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.x.nxv4i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
651 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
653 vuint16m1_t
test_sf_vc_v_x_u16m1(uint16_t rs1
, size_t vl
) {
654 return __riscv_sf_vc_v_x_u16m1(p27_26
, p24_20
, rs1
, vl
);
657 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u16m2(
658 // CHECK-RV32-NEXT: entry:
659 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
660 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
662 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u16m2(
663 // CHECK-RV64-NEXT: entry:
664 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.x.nxv8i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
665 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
667 vuint16m2_t
test_sf_vc_v_x_u16m2(uint16_t rs1
, size_t vl
) {
668 return __riscv_sf_vc_v_x_u16m2(p27_26
, p24_20
, rs1
, vl
);
671 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u16m4(
672 // CHECK-RV32-NEXT: entry:
673 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
674 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
676 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u16m4(
677 // CHECK-RV64-NEXT: entry:
678 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.x.nxv16i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
679 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
681 vuint16m4_t
test_sf_vc_v_x_u16m4(uint16_t rs1
, size_t vl
) {
682 return __riscv_sf_vc_v_x_u16m4(p27_26
, p24_20
, rs1
, vl
);
685 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u16m8(
686 // CHECK-RV32-NEXT: entry:
687 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.i32.i16.i32(i32 3, i32 31, i16 [[RS1:%.*]], i32 [[VL:%.*]])
688 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
690 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u16m8(
691 // CHECK-RV64-NEXT: entry:
692 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.x.nxv32i16.i64.i16.i64(i64 3, i64 31, i16 [[RS1:%.*]], i64 [[VL:%.*]])
693 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
695 vuint16m8_t
test_sf_vc_v_x_u16m8(uint16_t rs1
, size_t vl
) {
696 return __riscv_sf_vc_v_x_u16m8(p27_26
, p24_20
, rs1
, vl
);
699 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u32mf2(
700 // CHECK-RV32-NEXT: entry:
701 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
702 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
704 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u32mf2(
705 // CHECK-RV64-NEXT: entry:
706 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.x.nxv1i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
707 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
709 vuint32mf2_t
test_sf_vc_v_x_u32mf2(uint32_t rs1
, size_t vl
) {
710 return __riscv_sf_vc_v_x_u32mf2(p27_26
, p24_20
, rs1
, vl
);
713 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u32m1(
714 // CHECK-RV32-NEXT: entry:
715 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
716 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
718 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u32m1(
719 // CHECK-RV64-NEXT: entry:
720 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.x.nxv2i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
721 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
723 vuint32m1_t
test_sf_vc_v_x_u32m1(uint32_t rs1
, size_t vl
) {
724 return __riscv_sf_vc_v_x_u32m1(p27_26
, p24_20
, rs1
, vl
);
727 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u32m2(
728 // CHECK-RV32-NEXT: entry:
729 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
730 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
732 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u32m2(
733 // CHECK-RV64-NEXT: entry:
734 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.x.nxv4i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
735 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
737 vuint32m2_t
test_sf_vc_v_x_u32m2(uint32_t rs1
, size_t vl
) {
738 return __riscv_sf_vc_v_x_u32m2(p27_26
, p24_20
, rs1
, vl
);
741 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u32m4(
742 // CHECK-RV32-NEXT: entry:
743 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
744 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
746 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u32m4(
747 // CHECK-RV64-NEXT: entry:
748 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.x.nxv8i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
749 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
751 vuint32m4_t
test_sf_vc_v_x_u32m4(uint32_t rs1
, size_t vl
) {
752 return __riscv_sf_vc_v_x_u32m4(p27_26
, p24_20
, rs1
, vl
);
755 // CHECK-RV32-LABEL: @test_sf_vc_v_x_u32m8(
756 // CHECK-RV32-NEXT: entry:
757 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.i32.i32.i32(i32 3, i32 31, i32 [[RS1:%.*]], i32 [[VL:%.*]])
758 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
760 // CHECK-RV64-LABEL: @test_sf_vc_v_x_u32m8(
761 // CHECK-RV64-NEXT: entry:
762 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.x.nxv16i32.i64.i32.i64(i64 3, i64 31, i32 [[RS1:%.*]], i64 [[VL:%.*]])
763 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
765 vuint32m8_t
test_sf_vc_v_x_u32m8(uint32_t rs1
, size_t vl
) {
766 return __riscv_sf_vc_v_x_u32m8(p27_26
, p24_20
, rs1
, vl
);
769 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf8(
770 // CHECK-RV32-NEXT: entry:
771 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e8mf8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
772 // CHECK-RV32-NEXT: ret void
774 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf8(
775 // CHECK-RV64-NEXT: entry:
776 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e8mf8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
777 // CHECK-RV64-NEXT: ret void
779 void test_sf_vc_i_se_u8mf8(size_t vl
) {
780 __riscv_sf_vc_i_se_u8mf8(p27_26
, p24_20
, p11_7
, simm5
, vl
);
783 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf4(
784 // CHECK-RV32-NEXT: entry:
785 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e8mf4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
786 // CHECK-RV32-NEXT: ret void
788 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf4(
789 // CHECK-RV64-NEXT: entry:
790 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e8mf4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
791 // CHECK-RV64-NEXT: ret void
793 void test_sf_vc_i_se_u8mf4(size_t vl
) {
794 __riscv_sf_vc_i_se_u8mf4(p27_26
, p24_20
, p11_7
, simm5
, vl
);
797 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8mf2(
798 // CHECK-RV32-NEXT: entry:
799 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e8mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
800 // CHECK-RV32-NEXT: ret void
802 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8mf2(
803 // CHECK-RV64-NEXT: entry:
804 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e8mf2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
805 // CHECK-RV64-NEXT: ret void
807 void test_sf_vc_i_se_u8mf2(size_t vl
) {
808 __riscv_sf_vc_i_se_u8mf2(p27_26
, p24_20
, p11_7
, simm5
, vl
);
811 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m1(
812 // CHECK-RV32-NEXT: entry:
813 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e8m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
814 // CHECK-RV32-NEXT: ret void
816 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m1(
817 // CHECK-RV64-NEXT: entry:
818 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e8m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
819 // CHECK-RV64-NEXT: ret void
821 void test_sf_vc_i_se_u8m1(size_t vl
) {
822 __riscv_sf_vc_i_se_u8m1(p27_26
, p24_20
, p11_7
, simm5
, vl
);
825 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m2(
826 // CHECK-RV32-NEXT: entry:
827 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e8m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
828 // CHECK-RV32-NEXT: ret void
830 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m2(
831 // CHECK-RV64-NEXT: entry:
832 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e8m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
833 // CHECK-RV64-NEXT: ret void
835 void test_sf_vc_i_se_u8m2(size_t vl
) {
836 __riscv_sf_vc_i_se_u8m2(p27_26
, p24_20
, p11_7
, simm5
, vl
);
839 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m4(
840 // CHECK-RV32-NEXT: entry:
841 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e8m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
842 // CHECK-RV32-NEXT: ret void
844 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m4(
845 // CHECK-RV64-NEXT: entry:
846 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e8m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
847 // CHECK-RV64-NEXT: ret void
849 void test_sf_vc_i_se_u8m4(size_t vl
) {
850 __riscv_sf_vc_i_se_u8m4(p27_26
, p24_20
, p11_7
, simm5
, vl
);
853 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u8m8(
854 // CHECK-RV32-NEXT: entry:
855 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e8m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
856 // CHECK-RV32-NEXT: ret void
858 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u8m8(
859 // CHECK-RV64-NEXT: entry:
860 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e8m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
861 // CHECK-RV64-NEXT: ret void
863 void test_sf_vc_i_se_u8m8(size_t vl
) {
864 __riscv_sf_vc_i_se_u8m8(p27_26
, p24_20
, p11_7
, simm5
, vl
);
867 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16mf4(
868 // CHECK-RV32-NEXT: entry:
869 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e16mf4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
870 // CHECK-RV32-NEXT: ret void
872 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16mf4(
873 // CHECK-RV64-NEXT: entry:
874 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e16mf4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
875 // CHECK-RV64-NEXT: ret void
877 void test_sf_vc_i_se_u16mf4(size_t vl
) {
878 __riscv_sf_vc_i_se_u16mf4(p27_26
, p24_20
, p11_7
, simm5
, vl
);
881 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16mf2(
882 // CHECK-RV32-NEXT: entry:
883 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e16mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
884 // CHECK-RV32-NEXT: ret void
886 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16mf2(
887 // CHECK-RV64-NEXT: entry:
888 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e16mf2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
889 // CHECK-RV64-NEXT: ret void
891 void test_sf_vc_i_se_u16mf2(size_t vl
) {
892 __riscv_sf_vc_i_se_u16mf2(p27_26
, p24_20
, p11_7
, simm5
, vl
);
895 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m1(
896 // CHECK-RV32-NEXT: entry:
897 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e16m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
898 // CHECK-RV32-NEXT: ret void
900 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m1(
901 // CHECK-RV64-NEXT: entry:
902 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e16m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
903 // CHECK-RV64-NEXT: ret void
905 void test_sf_vc_i_se_u16m1(size_t vl
) {
906 __riscv_sf_vc_i_se_u16m1(p27_26
, p24_20
, p11_7
, simm5
, vl
);
909 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m2(
910 // CHECK-RV32-NEXT: entry:
911 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e16m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
912 // CHECK-RV32-NEXT: ret void
914 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m2(
915 // CHECK-RV64-NEXT: entry:
916 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e16m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
917 // CHECK-RV64-NEXT: ret void
919 void test_sf_vc_i_se_u16m2(size_t vl
) {
920 __riscv_sf_vc_i_se_u16m2(p27_26
, p24_20
, p11_7
, simm5
, vl
);
923 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m4(
924 // CHECK-RV32-NEXT: entry:
925 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e16m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
926 // CHECK-RV32-NEXT: ret void
928 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m4(
929 // CHECK-RV64-NEXT: entry:
930 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e16m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
931 // CHECK-RV64-NEXT: ret void
933 void test_sf_vc_i_se_u16m4(size_t vl
) {
934 __riscv_sf_vc_i_se_u16m4(p27_26
, p24_20
, p11_7
, simm5
, vl
);
937 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u16m8(
938 // CHECK-RV32-NEXT: entry:
939 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e16m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
940 // CHECK-RV32-NEXT: ret void
942 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u16m8(
943 // CHECK-RV64-NEXT: entry:
944 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e16m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
945 // CHECK-RV64-NEXT: ret void
947 void test_sf_vc_i_se_u16m8(size_t vl
) {
948 __riscv_sf_vc_i_se_u16m8(p27_26
, p24_20
, p11_7
, simm5
, vl
);
951 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32mf2(
952 // CHECK-RV32-NEXT: entry:
953 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e32mf2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
954 // CHECK-RV32-NEXT: ret void
956 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32mf2(
957 // CHECK-RV64-NEXT: entry:
958 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e32mf2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
959 // CHECK-RV64-NEXT: ret void
961 void test_sf_vc_i_se_u32mf2(size_t vl
) {
962 __riscv_sf_vc_i_se_u32mf2(p27_26
, p24_20
, p11_7
, simm5
, vl
);
965 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m1(
966 // CHECK-RV32-NEXT: entry:
967 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e32m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
968 // CHECK-RV32-NEXT: ret void
970 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m1(
971 // CHECK-RV64-NEXT: entry:
972 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e32m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
973 // CHECK-RV64-NEXT: ret void
975 void test_sf_vc_i_se_u32m1(size_t vl
) {
976 __riscv_sf_vc_i_se_u32m1(p27_26
, p24_20
, p11_7
, simm5
, vl
);
979 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m2(
980 // CHECK-RV32-NEXT: entry:
981 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e32m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
982 // CHECK-RV32-NEXT: ret void
984 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m2(
985 // CHECK-RV64-NEXT: entry:
986 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e32m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
987 // CHECK-RV64-NEXT: ret void
989 void test_sf_vc_i_se_u32m2(size_t vl
) {
990 __riscv_sf_vc_i_se_u32m2(p27_26
, p24_20
, p11_7
, simm5
, vl
);
993 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m4(
994 // CHECK-RV32-NEXT: entry:
995 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e32m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
996 // CHECK-RV32-NEXT: ret void
998 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m4(
999 // CHECK-RV64-NEXT: entry:
1000 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e32m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
1001 // CHECK-RV64-NEXT: ret void
1003 void test_sf_vc_i_se_u32m4(size_t vl
) {
1004 __riscv_sf_vc_i_se_u32m4(p27_26
, p24_20
, p11_7
, simm5
, vl
);
1007 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u32m8(
1008 // CHECK-RV32-NEXT: entry:
1009 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e32m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
1010 // CHECK-RV32-NEXT: ret void
1012 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u32m8(
1013 // CHECK-RV64-NEXT: entry:
1014 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e32m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
1015 // CHECK-RV64-NEXT: ret void
1017 void test_sf_vc_i_se_u32m8(size_t vl
) {
1018 __riscv_sf_vc_i_se_u32m8(p27_26
, p24_20
, p11_7
, simm5
, vl
);
1021 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m1(
1022 // CHECK-RV32-NEXT: entry:
1023 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e64m1.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
1024 // CHECK-RV32-NEXT: ret void
1026 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m1(
1027 // CHECK-RV64-NEXT: entry:
1028 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e64m1.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
1029 // CHECK-RV64-NEXT: ret void
1031 void test_sf_vc_i_se_u64m1(size_t vl
) {
1032 __riscv_sf_vc_i_se_u64m1(p27_26
, p24_20
, p11_7
, simm5
, vl
);
1035 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m2(
1036 // CHECK-RV32-NEXT: entry:
1037 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e64m2.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
1038 // CHECK-RV32-NEXT: ret void
1040 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m2(
1041 // CHECK-RV64-NEXT: entry:
1042 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e64m2.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
1043 // CHECK-RV64-NEXT: ret void
1045 void test_sf_vc_i_se_u64m2(size_t vl
) {
1046 __riscv_sf_vc_i_se_u64m2(p27_26
, p24_20
, p11_7
, simm5
, vl
);
1049 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m4(
1050 // CHECK-RV32-NEXT: entry:
1051 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e64m4.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
1052 // CHECK-RV32-NEXT: ret void
1054 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m4(
1055 // CHECK-RV64-NEXT: entry:
1056 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e64m4.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
1057 // CHECK-RV64-NEXT: ret void
1059 void test_sf_vc_i_se_u64m4(size_t vl
) {
1060 __riscv_sf_vc_i_se_u64m4(p27_26
, p24_20
, p11_7
, simm5
, vl
);
1063 // CHECK-RV32-LABEL: @test_sf_vc_i_se_u64m8(
1064 // CHECK-RV32-NEXT: entry:
1065 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.i.se.e64m8.i32.i32.i32(i32 3, i32 31, i32 31, i32 10, i32 [[VL:%.*]])
1066 // CHECK-RV32-NEXT: ret void
1068 // CHECK-RV64-LABEL: @test_sf_vc_i_se_u64m8(
1069 // CHECK-RV64-NEXT: entry:
1070 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.i.se.e64m8.i64.i64.i64(i64 3, i64 31, i64 31, i64 10, i64 [[VL:%.*]])
1071 // CHECK-RV64-NEXT: ret void
1073 void test_sf_vc_i_se_u64m8(size_t vl
) {
1074 __riscv_sf_vc_i_se_u64m8(p27_26
, p24_20
, p11_7
, simm5
, vl
);
1077 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8mf8(
1078 // CHECK-RV32-NEXT: entry:
1079 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1080 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1082 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8mf8(
1083 // CHECK-RV64-NEXT: entry:
1084 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.se.nxv1i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1085 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1087 vuint8mf8_t
test_sf_vc_v_i_se_u8mf8(size_t vl
) {
1088 return __riscv_sf_vc_v_i_se_u8mf8(p27_26
, p24_20
, simm5
, vl
);
1091 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8mf4(
1092 // CHECK-RV32-NEXT: entry:
1093 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1094 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1096 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8mf4(
1097 // CHECK-RV64-NEXT: entry:
1098 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.se.nxv2i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1099 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1101 vuint8mf4_t
test_sf_vc_v_i_se_u8mf4(size_t vl
) {
1102 return __riscv_sf_vc_v_i_se_u8mf4(p27_26
, p24_20
, simm5
, vl
);
1105 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8mf2(
1106 // CHECK-RV32-NEXT: entry:
1107 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1108 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1110 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8mf2(
1111 // CHECK-RV64-NEXT: entry:
1112 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.se.nxv4i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1113 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1115 vuint8mf2_t
test_sf_vc_v_i_se_u8mf2(size_t vl
) {
1116 return __riscv_sf_vc_v_i_se_u8mf2(p27_26
, p24_20
, simm5
, vl
);
1119 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8m1(
1120 // CHECK-RV32-NEXT: entry:
1121 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1122 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1124 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8m1(
1125 // CHECK-RV64-NEXT: entry:
1126 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.se.nxv8i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1127 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1129 vuint8m1_t
test_sf_vc_v_i_se_u8m1(size_t vl
) {
1130 return __riscv_sf_vc_v_i_se_u8m1(p27_26
, p24_20
, simm5
, vl
);
1133 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8m2(
1134 // CHECK-RV32-NEXT: entry:
1135 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1136 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1138 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8m2(
1139 // CHECK-RV64-NEXT: entry:
1140 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.se.nxv16i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1141 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1143 vuint8m2_t
test_sf_vc_v_i_se_u8m2(size_t vl
) {
1144 return __riscv_sf_vc_v_i_se_u8m2(p27_26
, p24_20
, simm5
, vl
);
1147 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8m4(
1148 // CHECK-RV32-NEXT: entry:
1149 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1150 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1152 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8m4(
1153 // CHECK-RV64-NEXT: entry:
1154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.se.nxv32i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1155 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1157 vuint8m4_t
test_sf_vc_v_i_se_u8m4(size_t vl
) {
1158 return __riscv_sf_vc_v_i_se_u8m4(p27_26
, p24_20
, simm5
, vl
);
1161 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u8m8(
1162 // CHECK-RV32-NEXT: entry:
1163 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1164 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1166 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u8m8(
1167 // CHECK-RV64-NEXT: entry:
1168 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.se.nxv64i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1169 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1171 vuint8m8_t
test_sf_vc_v_i_se_u8m8(size_t vl
) {
1172 return __riscv_sf_vc_v_i_se_u8m8(p27_26
, p24_20
, simm5
, vl
);
1175 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16mf4(
1176 // CHECK-RV32-NEXT: entry:
1177 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1178 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1180 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16mf4(
1181 // CHECK-RV64-NEXT: entry:
1182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.se.nxv1i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1183 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1185 vuint16mf4_t
test_sf_vc_v_i_se_u16mf4(size_t vl
) {
1186 return __riscv_sf_vc_v_i_se_u16mf4(p27_26
, p24_20
, simm5
, vl
);
1189 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16mf2(
1190 // CHECK-RV32-NEXT: entry:
1191 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1192 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1194 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16mf2(
1195 // CHECK-RV64-NEXT: entry:
1196 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.se.nxv2i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1197 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1199 vuint16mf2_t
test_sf_vc_v_i_se_u16mf2(size_t vl
) {
1200 return __riscv_sf_vc_v_i_se_u16mf2(p27_26
, p24_20
, simm5
, vl
);
1203 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16m1(
1204 // CHECK-RV32-NEXT: entry:
1205 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1206 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1208 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16m1(
1209 // CHECK-RV64-NEXT: entry:
1210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.se.nxv4i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1211 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1213 vuint16m1_t
test_sf_vc_v_i_se_u16m1(size_t vl
) {
1214 return __riscv_sf_vc_v_i_se_u16m1(p27_26
, p24_20
, simm5
, vl
);
1217 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16m2(
1218 // CHECK-RV32-NEXT: entry:
1219 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1220 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1222 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16m2(
1223 // CHECK-RV64-NEXT: entry:
1224 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.se.nxv8i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1225 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1227 vuint16m2_t
test_sf_vc_v_i_se_u16m2(size_t vl
) {
1228 return __riscv_sf_vc_v_i_se_u16m2(p27_26
, p24_20
, simm5
, vl
);
1231 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16m4(
1232 // CHECK-RV32-NEXT: entry:
1233 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1234 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1236 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16m4(
1237 // CHECK-RV64-NEXT: entry:
1238 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.se.nxv16i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1239 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1241 vuint16m4_t
test_sf_vc_v_i_se_u16m4(size_t vl
) {
1242 return __riscv_sf_vc_v_i_se_u16m4(p27_26
, p24_20
, simm5
, vl
);
1245 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u16m8(
1246 // CHECK-RV32-NEXT: entry:
1247 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1248 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1250 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u16m8(
1251 // CHECK-RV64-NEXT: entry:
1252 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.se.nxv32i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1253 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1255 vuint16m8_t
test_sf_vc_v_i_se_u16m8(size_t vl
) {
1256 return __riscv_sf_vc_v_i_se_u16m8(p27_26
, p24_20
, simm5
, vl
);
1259 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32mf2(
1260 // CHECK-RV32-NEXT: entry:
1261 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1262 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1264 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32mf2(
1265 // CHECK-RV64-NEXT: entry:
1266 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.se.nxv1i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1267 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1269 vuint32mf2_t
test_sf_vc_v_i_se_u32mf2(size_t vl
) {
1270 return __riscv_sf_vc_v_i_se_u32mf2(p27_26
, p24_20
, simm5
, vl
);
1273 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32m1(
1274 // CHECK-RV32-NEXT: entry:
1275 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1276 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1278 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32m1(
1279 // CHECK-RV64-NEXT: entry:
1280 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.se.nxv2i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1281 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1283 vuint32m1_t
test_sf_vc_v_i_se_u32m1(size_t vl
) {
1284 return __riscv_sf_vc_v_i_se_u32m1(p27_26
, p24_20
, simm5
, vl
);
1287 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32m2(
1288 // CHECK-RV32-NEXT: entry:
1289 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1290 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1292 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32m2(
1293 // CHECK-RV64-NEXT: entry:
1294 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.se.nxv4i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1295 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1297 vuint32m2_t
test_sf_vc_v_i_se_u32m2(size_t vl
) {
1298 return __riscv_sf_vc_v_i_se_u32m2(p27_26
, p24_20
, simm5
, vl
);
1301 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32m4(
1302 // CHECK-RV32-NEXT: entry:
1303 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1304 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1306 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32m4(
1307 // CHECK-RV64-NEXT: entry:
1308 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.se.nxv8i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1309 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1311 vuint32m4_t
test_sf_vc_v_i_se_u32m4(size_t vl
) {
1312 return __riscv_sf_vc_v_i_se_u32m4(p27_26
, p24_20
, simm5
, vl
);
1315 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u32m8(
1316 // CHECK-RV32-NEXT: entry:
1317 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1318 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1320 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u32m8(
1321 // CHECK-RV64-NEXT: entry:
1322 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.se.nxv16i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1323 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1325 vuint32m8_t
test_sf_vc_v_i_se_u32m8(size_t vl
) {
1326 return __riscv_sf_vc_v_i_se_u32m8(p27_26
, p24_20
, simm5
, vl
);
1329 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u64m1(
1330 // CHECK-RV32-NEXT: entry:
1331 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1332 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1334 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u64m1(
1335 // CHECK-RV64-NEXT: entry:
1336 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.se.nxv1i64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1337 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1339 vuint64m1_t
test_sf_vc_v_i_se_u64m1(size_t vl
) {
1340 return __riscv_sf_vc_v_i_se_u64m1(p27_26
, p24_20
, simm5
, vl
);
1343 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u64m2(
1344 // CHECK-RV32-NEXT: entry:
1345 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1346 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1348 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u64m2(
1349 // CHECK-RV64-NEXT: entry:
1350 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.se.nxv2i64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1351 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1353 vuint64m2_t
test_sf_vc_v_i_se_u64m2(size_t vl
) {
1354 return __riscv_sf_vc_v_i_se_u64m2(p27_26
, p24_20
, simm5
, vl
);
1357 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u64m4(
1358 // CHECK-RV32-NEXT: entry:
1359 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1360 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1362 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u64m4(
1363 // CHECK-RV64-NEXT: entry:
1364 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.se.nxv4i64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1365 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1367 vuint64m4_t
test_sf_vc_v_i_se_u64m4(size_t vl
) {
1368 return __riscv_sf_vc_v_i_se_u64m4(p27_26
, p24_20
, simm5
, vl
);
1371 // CHECK-RV32-LABEL: @test_sf_vc_v_i_se_u64m8(
1372 // CHECK-RV32-NEXT: entry:
1373 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1374 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1376 // CHECK-RV64-LABEL: @test_sf_vc_v_i_se_u64m8(
1377 // CHECK-RV64-NEXT: entry:
1378 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.se.nxv8i64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1379 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1381 vuint64m8_t
test_sf_vc_v_i_se_u64m8(size_t vl
) {
1382 return __riscv_sf_vc_v_i_se_u64m8(p27_26
, p24_20
, simm5
, vl
);
1385 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u8mf8(
1386 // CHECK-RV32-NEXT: entry:
1387 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1388 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1390 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u8mf8(
1391 // CHECK-RV64-NEXT: entry:
1392 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.i.nxv1i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1393 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1395 vuint8mf8_t
test_sf_vc_v_i_u8mf8(size_t vl
) {
1396 return __riscv_sf_vc_v_i_u8mf8(p27_26
, p24_20
, simm5
, vl
);
1399 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u8mf4(
1400 // CHECK-RV32-NEXT: entry:
1401 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1402 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1404 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u8mf4(
1405 // CHECK-RV64-NEXT: entry:
1406 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.i.nxv2i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1407 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1409 vuint8mf4_t
test_sf_vc_v_i_u8mf4(size_t vl
) {
1410 return __riscv_sf_vc_v_i_u8mf4(p27_26
, p24_20
, simm5
, vl
);
1413 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u8mf2(
1414 // CHECK-RV32-NEXT: entry:
1415 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1416 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1418 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u8mf2(
1419 // CHECK-RV64-NEXT: entry:
1420 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.i.nxv4i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1421 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1423 vuint8mf2_t
test_sf_vc_v_i_u8mf2(size_t vl
) {
1424 return __riscv_sf_vc_v_i_u8mf2(p27_26
, p24_20
, simm5
, vl
);
1427 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u8m1(
1428 // CHECK-RV32-NEXT: entry:
1429 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1430 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1432 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u8m1(
1433 // CHECK-RV64-NEXT: entry:
1434 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.i.nxv8i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1435 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1437 vuint8m1_t
test_sf_vc_v_i_u8m1(size_t vl
) {
1438 return __riscv_sf_vc_v_i_u8m1(p27_26
, p24_20
, simm5
, vl
);
1441 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u8m2(
1442 // CHECK-RV32-NEXT: entry:
1443 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1444 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1446 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u8m2(
1447 // CHECK-RV64-NEXT: entry:
1448 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.i.nxv16i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1449 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1451 vuint8m2_t
test_sf_vc_v_i_u8m2(size_t vl
) {
1452 return __riscv_sf_vc_v_i_u8m2(p27_26
, p24_20
, simm5
, vl
);
1455 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u8m4(
1456 // CHECK-RV32-NEXT: entry:
1457 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1458 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1460 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u8m4(
1461 // CHECK-RV64-NEXT: entry:
1462 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.i.nxv32i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1463 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1465 vuint8m4_t
test_sf_vc_v_i_u8m4(size_t vl
) {
1466 return __riscv_sf_vc_v_i_u8m4(p27_26
, p24_20
, simm5
, vl
);
1469 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u8m8(
1470 // CHECK-RV32-NEXT: entry:
1471 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1472 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1474 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u8m8(
1475 // CHECK-RV64-NEXT: entry:
1476 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.i.nxv64i8.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1477 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1479 vuint8m8_t
test_sf_vc_v_i_u8m8(size_t vl
) {
1480 return __riscv_sf_vc_v_i_u8m8(p27_26
, p24_20
, simm5
, vl
);
1483 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u16mf4(
1484 // CHECK-RV32-NEXT: entry:
1485 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1486 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1488 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u16mf4(
1489 // CHECK-RV64-NEXT: entry:
1490 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.i.nxv1i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1491 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1493 vuint16mf4_t
test_sf_vc_v_i_u16mf4(size_t vl
) {
1494 return __riscv_sf_vc_v_i_u16mf4(p27_26
, p24_20
, simm5
, vl
);
1497 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u16mf2(
1498 // CHECK-RV32-NEXT: entry:
1499 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1500 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1502 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u16mf2(
1503 // CHECK-RV64-NEXT: entry:
1504 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.i.nxv2i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1505 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1507 vuint16mf2_t
test_sf_vc_v_i_u16mf2(size_t vl
) {
1508 return __riscv_sf_vc_v_i_u16mf2(p27_26
, p24_20
, simm5
, vl
);
1511 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u16m1(
1512 // CHECK-RV32-NEXT: entry:
1513 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1514 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1516 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u16m1(
1517 // CHECK-RV64-NEXT: entry:
1518 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.i.nxv4i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1519 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1521 vuint16m1_t
test_sf_vc_v_i_u16m1(size_t vl
) {
1522 return __riscv_sf_vc_v_i_u16m1(p27_26
, p24_20
, simm5
, vl
);
1525 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u16m2(
1526 // CHECK-RV32-NEXT: entry:
1527 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1528 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1530 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u16m2(
1531 // CHECK-RV64-NEXT: entry:
1532 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.i.nxv8i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1533 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1535 vuint16m2_t
test_sf_vc_v_i_u16m2(size_t vl
) {
1536 return __riscv_sf_vc_v_i_u16m2(p27_26
, p24_20
, simm5
, vl
);
1539 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u16m4(
1540 // CHECK-RV32-NEXT: entry:
1541 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1542 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1544 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u16m4(
1545 // CHECK-RV64-NEXT: entry:
1546 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.i.nxv16i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1547 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1549 vuint16m4_t
test_sf_vc_v_i_u16m4(size_t vl
) {
1550 return __riscv_sf_vc_v_i_u16m4(p27_26
, p24_20
, simm5
, vl
);
1553 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u16m8(
1554 // CHECK-RV32-NEXT: entry:
1555 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1556 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1558 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u16m8(
1559 // CHECK-RV64-NEXT: entry:
1560 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.i.nxv32i16.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1561 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1563 vuint16m8_t
test_sf_vc_v_i_u16m8(size_t vl
) {
1564 return __riscv_sf_vc_v_i_u16m8(p27_26
, p24_20
, simm5
, vl
);
1567 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u32mf2(
1568 // CHECK-RV32-NEXT: entry:
1569 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1570 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1572 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u32mf2(
1573 // CHECK-RV64-NEXT: entry:
1574 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.i.nxv1i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1575 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1577 vuint32mf2_t
test_sf_vc_v_i_u32mf2(size_t vl
) {
1578 return __riscv_sf_vc_v_i_u32mf2(p27_26
, p24_20
, simm5
, vl
);
1581 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u32m1(
1582 // CHECK-RV32-NEXT: entry:
1583 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1584 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1586 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u32m1(
1587 // CHECK-RV64-NEXT: entry:
1588 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.i.nxv2i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1589 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1591 vuint32m1_t
test_sf_vc_v_i_u32m1(size_t vl
) {
1592 return __riscv_sf_vc_v_i_u32m1(p27_26
, p24_20
, simm5
, vl
);
1595 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u32m2(
1596 // CHECK-RV32-NEXT: entry:
1597 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1598 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1600 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u32m2(
1601 // CHECK-RV64-NEXT: entry:
1602 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.i.nxv4i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1603 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1605 vuint32m2_t
test_sf_vc_v_i_u32m2(size_t vl
) {
1606 return __riscv_sf_vc_v_i_u32m2(p27_26
, p24_20
, simm5
, vl
);
1609 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u32m4(
1610 // CHECK-RV32-NEXT: entry:
1611 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1612 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1614 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u32m4(
1615 // CHECK-RV64-NEXT: entry:
1616 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.i.nxv8i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1617 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1619 vuint32m4_t
test_sf_vc_v_i_u32m4(size_t vl
) {
1620 return __riscv_sf_vc_v_i_u32m4(p27_26
, p24_20
, simm5
, vl
);
1623 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u32m8(
1624 // CHECK-RV32-NEXT: entry:
1625 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1626 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1628 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u32m8(
1629 // CHECK-RV64-NEXT: entry:
1630 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.i.nxv16i32.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1631 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1633 vuint32m8_t
test_sf_vc_v_i_u32m8(size_t vl
) {
1634 return __riscv_sf_vc_v_i_u32m8(p27_26
, p24_20
, simm5
, vl
);
1637 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u64m1(
1638 // CHECK-RV32-NEXT: entry:
1639 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1640 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1642 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u64m1(
1643 // CHECK-RV64-NEXT: entry:
1644 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.i.nxv1i64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1645 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1647 vuint64m1_t
test_sf_vc_v_i_u64m1(size_t vl
) {
1648 return __riscv_sf_vc_v_i_u64m1(p27_26
, p24_20
, simm5
, vl
);
1651 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u64m2(
1652 // CHECK-RV32-NEXT: entry:
1653 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1654 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1656 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u64m2(
1657 // CHECK-RV64-NEXT: entry:
1658 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.i.nxv2i64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1659 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1661 vuint64m2_t
test_sf_vc_v_i_u64m2(size_t vl
) {
1662 return __riscv_sf_vc_v_i_u64m2(p27_26
, p24_20
, simm5
, vl
);
1665 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u64m4(
1666 // CHECK-RV32-NEXT: entry:
1667 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1668 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1670 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u64m4(
1671 // CHECK-RV64-NEXT: entry:
1672 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.i.nxv4i64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1673 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1675 vuint64m4_t
test_sf_vc_v_i_u64m4(size_t vl
) {
1676 return __riscv_sf_vc_v_i_u64m4(p27_26
, p24_20
, simm5
, vl
);
1679 // CHECK-RV32-LABEL: @test_sf_vc_v_i_u64m8(
1680 // CHECK-RV32-NEXT: entry:
1681 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.i32.i32.i32(i32 3, i32 31, i32 10, i32 [[VL:%.*]])
1682 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1684 // CHECK-RV64-LABEL: @test_sf_vc_v_i_u64m8(
1685 // CHECK-RV64-NEXT: entry:
1686 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.i.nxv8i64.i64.i64.i64(i64 3, i64 31, i64 10, i64 [[VL:%.*]])
1687 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1689 vuint64m8_t
test_sf_vc_v_i_u64m8(size_t vl
) {
1690 return __riscv_sf_vc_v_i_u64m8(p27_26
, p24_20
, simm5
, vl
);