Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / non-overloaded / xsfvcp-xvv.c
blob4efd7da81bac45061852fa68db5efcbf9cd6a2ef
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
4 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 #include <sifive_vector.h>
8 #define p27_26 (0b11)
9 #define p26 (0b1)
10 #define simm5 (10)
12 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf8(
13 // CHECK-RV32-NEXT: entry:
14 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
15 // CHECK-RV32-NEXT: ret void
17 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf8(
18 // CHECK-RV64-NEXT: entry:
19 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
20 // CHECK-RV64-NEXT: ret void
22 void test_sf_vc_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
23 __riscv_sf_vc_vvv_se_u8mf8(p27_26, vd, vs2, vs1, vl);
26 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf4(
27 // CHECK-RV32-NEXT: entry:
28 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
29 // CHECK-RV32-NEXT: ret void
31 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf4(
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT: ret void
36 void test_sf_vc_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
37 __riscv_sf_vc_vvv_se_u8mf4(p27_26, vd, vs2, vs1, vl);
40 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8mf2(
41 // CHECK-RV32-NEXT: entry:
42 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
43 // CHECK-RV32-NEXT: ret void
45 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8mf2(
46 // CHECK-RV64-NEXT: entry:
47 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
48 // CHECK-RV64-NEXT: ret void
50 void test_sf_vc_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
51 __riscv_sf_vc_vvv_se_u8mf2(p27_26, vd, vs2, vs1, vl);
54 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m1(
55 // CHECK-RV32-NEXT: entry:
56 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
57 // CHECK-RV32-NEXT: ret void
59 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m1(
60 // CHECK-RV64-NEXT: entry:
61 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT: ret void
64 void test_sf_vc_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
65 __riscv_sf_vc_vvv_se_u8m1(p27_26, vd, vs2, vs1, vl);
68 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m2(
69 // CHECK-RV32-NEXT: entry:
70 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
71 // CHECK-RV32-NEXT: ret void
73 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m2(
74 // CHECK-RV64-NEXT: entry:
75 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
76 // CHECK-RV64-NEXT: ret void
78 void test_sf_vc_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
79 __riscv_sf_vc_vvv_se_u8m2(p27_26, vd, vs2, vs1, vl);
82 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m4(
83 // CHECK-RV32-NEXT: entry:
84 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
85 // CHECK-RV32-NEXT: ret void
87 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m4(
88 // CHECK-RV64-NEXT: entry:
89 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
90 // CHECK-RV64-NEXT: ret void
92 void test_sf_vc_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
93 __riscv_sf_vc_vvv_se_u8m4(p27_26, vd, vs2, vs1, vl);
96 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u8m8(
97 // CHECK-RV32-NEXT: entry:
98 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv64i8.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
99 // CHECK-RV32-NEXT: ret void
101 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u8m8(
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv64i8.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
104 // CHECK-RV64-NEXT: ret void
106 void test_sf_vc_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
107 __riscv_sf_vc_vvv_se_u8m8(p27_26, vd, vs2, vs1, vl);
110 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16mf4(
111 // CHECK-RV32-NEXT: entry:
112 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
113 // CHECK-RV32-NEXT: ret void
115 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16mf4(
116 // CHECK-RV64-NEXT: entry:
117 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
118 // CHECK-RV64-NEXT: ret void
120 void test_sf_vc_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
121 __riscv_sf_vc_vvv_se_u16mf4(p27_26, vd, vs2, vs1, vl);
124 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16mf2(
125 // CHECK-RV32-NEXT: entry:
126 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
127 // CHECK-RV32-NEXT: ret void
129 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16mf2(
130 // CHECK-RV64-NEXT: entry:
131 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT: ret void
134 void test_sf_vc_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
135 __riscv_sf_vc_vvv_se_u16mf2(p27_26, vd, vs2, vs1, vl);
138 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m1(
139 // CHECK-RV32-NEXT: entry:
140 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
141 // CHECK-RV32-NEXT: ret void
143 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m1(
144 // CHECK-RV64-NEXT: entry:
145 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
146 // CHECK-RV64-NEXT: ret void
148 void test_sf_vc_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
149 __riscv_sf_vc_vvv_se_u16m1(p27_26, vd, vs2, vs1, vl);
152 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m2(
153 // CHECK-RV32-NEXT: entry:
154 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
155 // CHECK-RV32-NEXT: ret void
157 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m2(
158 // CHECK-RV64-NEXT: entry:
159 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
160 // CHECK-RV64-NEXT: ret void
162 void test_sf_vc_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
163 __riscv_sf_vc_vvv_se_u16m2(p27_26, vd, vs2, vs1, vl);
166 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m4(
167 // CHECK-RV32-NEXT: entry:
168 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
169 // CHECK-RV32-NEXT: ret void
171 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m4(
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
174 // CHECK-RV64-NEXT: ret void
176 void test_sf_vc_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
177 __riscv_sf_vc_vvv_se_u16m4(p27_26, vd, vs2, vs1, vl);
180 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u16m8(
181 // CHECK-RV32-NEXT: entry:
182 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv32i16.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
183 // CHECK-RV32-NEXT: ret void
185 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u16m8(
186 // CHECK-RV64-NEXT: entry:
187 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv32i16.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT: ret void
190 void test_sf_vc_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
191 __riscv_sf_vc_vvv_se_u16m8(p27_26, vd, vs2, vs1, vl);
194 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32mf2(
195 // CHECK-RV32-NEXT: entry:
196 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
197 // CHECK-RV32-NEXT: ret void
199 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32mf2(
200 // CHECK-RV64-NEXT: entry:
201 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
202 // CHECK-RV64-NEXT: ret void
204 void test_sf_vc_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
205 __riscv_sf_vc_vvv_se_u32mf2(p27_26, vd, vs2, vs1, vl);
208 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m1(
209 // CHECK-RV32-NEXT: entry:
210 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
211 // CHECK-RV32-NEXT: ret void
213 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m1(
214 // CHECK-RV64-NEXT: entry:
215 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
216 // CHECK-RV64-NEXT: ret void
218 void test_sf_vc_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
219 __riscv_sf_vc_vvv_se_u32m1(p27_26, vd, vs2, vs1, vl);
222 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m2(
223 // CHECK-RV32-NEXT: entry:
224 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
225 // CHECK-RV32-NEXT: ret void
227 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m2(
228 // CHECK-RV64-NEXT: entry:
229 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
230 // CHECK-RV64-NEXT: ret void
232 void test_sf_vc_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
233 __riscv_sf_vc_vvv_se_u32m2(p27_26, vd, vs2, vs1, vl);
236 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m4(
237 // CHECK-RV32-NEXT: entry:
238 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
239 // CHECK-RV32-NEXT: ret void
241 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m4(
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
244 // CHECK-RV64-NEXT: ret void
246 void test_sf_vc_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
247 __riscv_sf_vc_vvv_se_u32m4(p27_26, vd, vs2, vs1, vl);
250 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u32m8(
251 // CHECK-RV32-NEXT: entry:
252 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv16i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
253 // CHECK-RV32-NEXT: ret void
255 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u32m8(
256 // CHECK-RV64-NEXT: entry:
257 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv16i32.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
258 // CHECK-RV64-NEXT: ret void
260 void test_sf_vc_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
261 __riscv_sf_vc_vvv_se_u32m8(p27_26, vd, vs2, vs1, vl);
264 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m1(
265 // CHECK-RV32-NEXT: entry:
266 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv1i64.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
267 // CHECK-RV32-NEXT: ret void
269 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m1(
270 // CHECK-RV64-NEXT: entry:
271 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv1i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
272 // CHECK-RV64-NEXT: ret void
274 void test_sf_vc_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
275 __riscv_sf_vc_vvv_se_u64m1(p27_26, vd, vs2, vs1, vl);
278 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m2(
279 // CHECK-RV32-NEXT: entry:
280 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv2i64.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
281 // CHECK-RV32-NEXT: ret void
283 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m2(
284 // CHECK-RV64-NEXT: entry:
285 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv2i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
286 // CHECK-RV64-NEXT: ret void
288 void test_sf_vc_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
289 __riscv_sf_vc_vvv_se_u64m2(p27_26, vd, vs2, vs1, vl);
292 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m4(
293 // CHECK-RV32-NEXT: entry:
294 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv4i64.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
295 // CHECK-RV32-NEXT: ret void
297 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m4(
298 // CHECK-RV64-NEXT: entry:
299 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv4i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
300 // CHECK-RV64-NEXT: ret void
302 void test_sf_vc_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
303 __riscv_sf_vc_vvv_se_u64m4(p27_26, vd, vs2, vs1, vl);
306 // CHECK-RV32-LABEL: @test_sf_vc_vvv_se_u64m8(
307 // CHECK-RV32-NEXT: entry:
308 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i32.nxv8i64.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
309 // CHECK-RV32-NEXT: ret void
311 // CHECK-RV64-LABEL: @test_sf_vc_vvv_se_u64m8(
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvv.se.i64.nxv8i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
314 // CHECK-RV64-NEXT: ret void
316 void test_sf_vc_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
317 __riscv_sf_vc_vvv_se_u64m8(p27_26, vd, vs2, vs1, vl);
320 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf8(
321 // CHECK-RV32-NEXT: entry:
322 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i32.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
323 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
325 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf8(
326 // CHECK-RV64-NEXT: entry:
327 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.i64.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
328 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
330 vuint8mf8_t test_sf_vc_v_vvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
331 return __riscv_sf_vc_v_vvv_se_u8mf8(p27_26, vd, vs2, vs1, vl);
334 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf4(
335 // CHECK-RV32-NEXT: entry:
336 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i32.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
337 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
339 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf4(
340 // CHECK-RV64-NEXT: entry:
341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.i64.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
344 vuint8mf4_t test_sf_vc_v_vvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
345 return __riscv_sf_vc_v_vvv_se_u8mf4(p27_26, vd, vs2, vs1, vl);
348 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8mf2(
349 // CHECK-RV32-NEXT: entry:
350 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i32.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
351 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
353 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8mf2(
354 // CHECK-RV64-NEXT: entry:
355 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.i64.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
356 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
358 vuint8mf2_t test_sf_vc_v_vvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
359 return __riscv_sf_vc_v_vvv_se_u8mf2(p27_26, vd, vs2, vs1, vl);
362 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m1(
363 // CHECK-RV32-NEXT: entry:
364 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i32.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
365 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
367 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m1(
368 // CHECK-RV64-NEXT: entry:
369 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.i64.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
370 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
372 vuint8m1_t test_sf_vc_v_vvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
373 return __riscv_sf_vc_v_vvv_se_u8m1(p27_26, vd, vs2, vs1, vl);
376 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m2(
377 // CHECK-RV32-NEXT: entry:
378 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i32.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
379 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
381 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m2(
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.i64.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
384 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
386 vuint8m2_t test_sf_vc_v_vvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
387 return __riscv_sf_vc_v_vvv_se_u8m2(p27_26, vd, vs2, vs1, vl);
390 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m4(
391 // CHECK-RV32-NEXT: entry:
392 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i32.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
393 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
395 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m4(
396 // CHECK-RV64-NEXT: entry:
397 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.i64.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
398 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
400 vuint8m4_t test_sf_vc_v_vvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
401 return __riscv_sf_vc_v_vvv_se_u8m4(p27_26, vd, vs2, vs1, vl);
404 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u8m8(
405 // CHECK-RV32-NEXT: entry:
406 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i32.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
407 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
409 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u8m8(
410 // CHECK-RV64-NEXT: entry:
411 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.i64.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
412 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
414 vuint8m8_t test_sf_vc_v_vvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
415 return __riscv_sf_vc_v_vvv_se_u8m8(p27_26, vd, vs2, vs1, vl);
418 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16mf4(
419 // CHECK-RV32-NEXT: entry:
420 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i32.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
421 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
423 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16mf4(
424 // CHECK-RV64-NEXT: entry:
425 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.i64.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
426 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
428 vuint16mf4_t test_sf_vc_v_vvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
429 return __riscv_sf_vc_v_vvv_se_u16mf4(p27_26, vd, vs2, vs1, vl);
432 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16mf2(
433 // CHECK-RV32-NEXT: entry:
434 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i32.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
435 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
437 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16mf2(
438 // CHECK-RV64-NEXT: entry:
439 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.i64.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
440 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
442 vuint16mf2_t test_sf_vc_v_vvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
443 return __riscv_sf_vc_v_vvv_se_u16mf2(p27_26, vd, vs2, vs1, vl);
446 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m1(
447 // CHECK-RV32-NEXT: entry:
448 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i32.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
449 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
451 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m1(
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.i64.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
454 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
456 vuint16m1_t test_sf_vc_v_vvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
457 return __riscv_sf_vc_v_vvv_se_u16m1(p27_26, vd, vs2, vs1, vl);
460 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m2(
461 // CHECK-RV32-NEXT: entry:
462 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i32.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
463 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
465 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m2(
466 // CHECK-RV64-NEXT: entry:
467 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.i64.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
468 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
470 vuint16m2_t test_sf_vc_v_vvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
471 return __riscv_sf_vc_v_vvv_se_u16m2(p27_26, vd, vs2, vs1, vl);
474 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m4(
475 // CHECK-RV32-NEXT: entry:
476 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i32.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
477 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
479 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m4(
480 // CHECK-RV64-NEXT: entry:
481 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.i64.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
482 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
484 vuint16m4_t test_sf_vc_v_vvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
485 return __riscv_sf_vc_v_vvv_se_u16m4(p27_26, vd, vs2, vs1, vl);
488 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u16m8(
489 // CHECK-RV32-NEXT: entry:
490 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i32.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
491 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
493 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u16m8(
494 // CHECK-RV64-NEXT: entry:
495 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.i64.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
498 vuint16m8_t test_sf_vc_v_vvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
499 return __riscv_sf_vc_v_vvv_se_u16m8(p27_26, vd, vs2, vs1, vl);
502 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32mf2(
503 // CHECK-RV32-NEXT: entry:
504 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
505 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
507 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32mf2(
508 // CHECK-RV64-NEXT: entry:
509 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.i64.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
510 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
512 vuint32mf2_t test_sf_vc_v_vvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
513 return __riscv_sf_vc_v_vvv_se_u32mf2(p27_26, vd, vs2, vs1, vl);
516 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m1(
517 // CHECK-RV32-NEXT: entry:
518 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
519 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
521 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m1(
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.i64.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
524 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
526 vuint32m1_t test_sf_vc_v_vvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
527 return __riscv_sf_vc_v_vvv_se_u32m1(p27_26, vd, vs2, vs1, vl);
530 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m2(
531 // CHECK-RV32-NEXT: entry:
532 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
533 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
535 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m2(
536 // CHECK-RV64-NEXT: entry:
537 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.i64.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
538 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
540 vuint32m2_t test_sf_vc_v_vvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
541 return __riscv_sf_vc_v_vvv_se_u32m2(p27_26, vd, vs2, vs1, vl);
544 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m4(
545 // CHECK-RV32-NEXT: entry:
546 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
547 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
549 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m4(
550 // CHECK-RV64-NEXT: entry:
551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.i64.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
552 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
554 vuint32m4_t test_sf_vc_v_vvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
555 return __riscv_sf_vc_v_vvv_se_u32m4(p27_26, vd, vs2, vs1, vl);
558 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u32m8(
559 // CHECK-RV32-NEXT: entry:
560 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
561 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
563 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u32m8(
564 // CHECK-RV64-NEXT: entry:
565 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.i64.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
566 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
568 vuint32m8_t test_sf_vc_v_vvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
569 return __riscv_sf_vc_v_vvv_se_u32m8(p27_26, vd, vs2, vs1, vl);
572 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m1(
573 // CHECK-RV32-NEXT: entry:
574 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i32.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
575 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
577 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m1(
578 // CHECK-RV64-NEXT: entry:
579 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
580 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
582 vuint64m1_t test_sf_vc_v_vvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
583 return __riscv_sf_vc_v_vvv_se_u64m1(p27_26, vd, vs2, vs1, vl);
586 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m2(
587 // CHECK-RV32-NEXT: entry:
588 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i32.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
589 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
591 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m2(
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
594 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
596 vuint64m2_t test_sf_vc_v_vvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
597 return __riscv_sf_vc_v_vvv_se_u64m2(p27_26, vd, vs2, vs1, vl);
600 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m4(
601 // CHECK-RV32-NEXT: entry:
602 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i32.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
603 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
605 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m4(
606 // CHECK-RV64-NEXT: entry:
607 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
608 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
610 vuint64m4_t test_sf_vc_v_vvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
611 return __riscv_sf_vc_v_vvv_se_u64m4(p27_26, vd, vs2, vs1, vl);
614 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_se_u64m8(
615 // CHECK-RV32-NEXT: entry:
616 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i32.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
617 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
619 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_se_u64m8(
620 // CHECK-RV64-NEXT: entry:
621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
624 vuint64m8_t test_sf_vc_v_vvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
625 return __riscv_sf_vc_v_vvv_se_u64m8(p27_26, vd, vs2, vs1, vl);
628 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf8(
629 // CHECK-RV32-NEXT: entry:
630 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.i32.nxv1i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
631 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
633 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf8(
634 // CHECK-RV64-NEXT: entry:
635 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.i64.nxv1i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
636 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
638 vuint8mf8_t test_sf_vc_v_vvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
639 return __riscv_sf_vc_v_vvv_u8mf8(p27_26, vd, vs2, vs1, vl);
642 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf4(
643 // CHECK-RV32-NEXT: entry:
644 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.i32.nxv2i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
645 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
647 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf4(
648 // CHECK-RV64-NEXT: entry:
649 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.i64.nxv2i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
650 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
652 vuint8mf4_t test_sf_vc_v_vvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
653 return __riscv_sf_vc_v_vvv_u8mf4(p27_26, vd, vs2, vs1, vl);
656 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8mf2(
657 // CHECK-RV32-NEXT: entry:
658 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.i32.nxv4i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
659 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
661 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8mf2(
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.i64.nxv4i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
664 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
666 vuint8mf2_t test_sf_vc_v_vvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
667 return __riscv_sf_vc_v_vvv_u8mf2(p27_26, vd, vs2, vs1, vl);
670 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m1(
671 // CHECK-RV32-NEXT: entry:
672 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.i32.nxv8i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
673 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
675 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m1(
676 // CHECK-RV64-NEXT: entry:
677 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.i64.nxv8i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
678 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
680 vuint8m1_t test_sf_vc_v_vvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
681 return __riscv_sf_vc_v_vvv_u8m1(p27_26, vd, vs2, vs1, vl);
684 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m2(
685 // CHECK-RV32-NEXT: entry:
686 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.i32.nxv16i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
687 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
689 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m2(
690 // CHECK-RV64-NEXT: entry:
691 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.i64.nxv16i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
692 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
694 vuint8m2_t test_sf_vc_v_vvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
695 return __riscv_sf_vc_v_vvv_u8m2(p27_26, vd, vs2, vs1, vl);
698 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m4(
699 // CHECK-RV32-NEXT: entry:
700 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.i32.nxv32i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
701 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
703 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m4(
704 // CHECK-RV64-NEXT: entry:
705 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.i64.nxv32i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
706 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
708 vuint8m4_t test_sf_vc_v_vvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
709 return __riscv_sf_vc_v_vvv_u8m4(p27_26, vd, vs2, vs1, vl);
712 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u8m8(
713 // CHECK-RV32-NEXT: entry:
714 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.i32.nxv64i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
715 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
717 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u8m8(
718 // CHECK-RV64-NEXT: entry:
719 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.i64.nxv64i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
720 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
722 vuint8m8_t test_sf_vc_v_vvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
723 return __riscv_sf_vc_v_vvv_u8m8(p27_26, vd, vs2, vs1, vl);
726 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16mf4(
727 // CHECK-RV32-NEXT: entry:
728 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.i32.nxv1i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
729 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
731 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16mf4(
732 // CHECK-RV64-NEXT: entry:
733 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.i64.nxv1i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
734 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
736 vuint16mf4_t test_sf_vc_v_vvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
737 return __riscv_sf_vc_v_vvv_u16mf4(p27_26, vd, vs2, vs1, vl);
740 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16mf2(
741 // CHECK-RV32-NEXT: entry:
742 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.i32.nxv2i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
743 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
745 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16mf2(
746 // CHECK-RV64-NEXT: entry:
747 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.i64.nxv2i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
748 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
750 vuint16mf2_t test_sf_vc_v_vvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
751 return __riscv_sf_vc_v_vvv_u16mf2(p27_26, vd, vs2, vs1, vl);
754 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m1(
755 // CHECK-RV32-NEXT: entry:
756 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.i32.nxv4i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
757 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
759 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m1(
760 // CHECK-RV64-NEXT: entry:
761 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.i64.nxv4i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
762 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
764 vuint16m1_t test_sf_vc_v_vvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
765 return __riscv_sf_vc_v_vvv_u16m1(p27_26, vd, vs2, vs1, vl);
768 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m2(
769 // CHECK-RV32-NEXT: entry:
770 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.i32.nxv8i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
771 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
773 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m2(
774 // CHECK-RV64-NEXT: entry:
775 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.i64.nxv8i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
776 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
778 vuint16m2_t test_sf_vc_v_vvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
779 return __riscv_sf_vc_v_vvv_u16m2(p27_26, vd, vs2, vs1, vl);
782 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m4(
783 // CHECK-RV32-NEXT: entry:
784 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.i32.nxv16i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
785 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
787 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m4(
788 // CHECK-RV64-NEXT: entry:
789 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.i64.nxv16i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
790 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
792 vuint16m4_t test_sf_vc_v_vvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
793 return __riscv_sf_vc_v_vvv_u16m4(p27_26, vd, vs2, vs1, vl);
796 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u16m8(
797 // CHECK-RV32-NEXT: entry:
798 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.i32.nxv32i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
799 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
801 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u16m8(
802 // CHECK-RV64-NEXT: entry:
803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.i64.nxv32i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
804 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
806 vuint16m8_t test_sf_vc_v_vvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
807 return __riscv_sf_vc_v_vvv_u16m8(p27_26, vd, vs2, vs1, vl);
810 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32mf2(
811 // CHECK-RV32-NEXT: entry:
812 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.i32.nxv1i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
813 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
815 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32mf2(
816 // CHECK-RV64-NEXT: entry:
817 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.i64.nxv1i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
818 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
820 vuint32mf2_t test_sf_vc_v_vvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
821 return __riscv_sf_vc_v_vvv_u32mf2(p27_26, vd, vs2, vs1, vl);
824 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m1(
825 // CHECK-RV32-NEXT: entry:
826 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.i32.nxv2i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
827 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
829 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m1(
830 // CHECK-RV64-NEXT: entry:
831 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.i64.nxv2i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
832 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
834 vuint32m1_t test_sf_vc_v_vvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
835 return __riscv_sf_vc_v_vvv_u32m1(p27_26, vd, vs2, vs1, vl);
838 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m2(
839 // CHECK-RV32-NEXT: entry:
840 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.i32.nxv4i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
841 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
843 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m2(
844 // CHECK-RV64-NEXT: entry:
845 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.i64.nxv4i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
846 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
848 vuint32m2_t test_sf_vc_v_vvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
849 return __riscv_sf_vc_v_vvv_u32m2(p27_26, vd, vs2, vs1, vl);
852 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m4(
853 // CHECK-RV32-NEXT: entry:
854 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.i32.nxv8i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
855 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
857 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m4(
858 // CHECK-RV64-NEXT: entry:
859 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.i64.nxv8i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
860 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
862 vuint32m4_t test_sf_vc_v_vvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
863 return __riscv_sf_vc_v_vvv_u32m4(p27_26, vd, vs2, vs1, vl);
866 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u32m8(
867 // CHECK-RV32-NEXT: entry:
868 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.i32.nxv16i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
869 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
871 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u32m8(
872 // CHECK-RV64-NEXT: entry:
873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.i64.nxv16i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
874 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
876 vuint32m8_t test_sf_vc_v_vvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
877 return __riscv_sf_vc_v_vvv_u32m8(p27_26, vd, vs2, vs1, vl);
880 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m1(
881 // CHECK-RV32-NEXT: entry:
882 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.i32.nxv1i64.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
883 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
885 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m1(
886 // CHECK-RV64-NEXT: entry:
887 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.i64.nxv1i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
888 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
890 vuint64m1_t test_sf_vc_v_vvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
891 return __riscv_sf_vc_v_vvv_u64m1(p27_26, vd, vs2, vs1, vl);
894 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m2(
895 // CHECK-RV32-NEXT: entry:
896 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.i32.nxv2i64.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
897 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
899 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m2(
900 // CHECK-RV64-NEXT: entry:
901 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.i64.nxv2i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
902 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
904 vuint64m2_t test_sf_vc_v_vvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
905 return __riscv_sf_vc_v_vvv_u64m2(p27_26, vd, vs2, vs1, vl);
908 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m4(
909 // CHECK-RV32-NEXT: entry:
910 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.i32.nxv4i64.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
911 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
913 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m4(
914 // CHECK-RV64-NEXT: entry:
915 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.i64.nxv4i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
916 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
918 vuint64m4_t test_sf_vc_v_vvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
919 return __riscv_sf_vc_v_vvv_u64m4(p27_26, vd, vs2, vs1, vl);
922 // CHECK-RV32-LABEL: @test_sf_vc_v_vvv_u64m8(
923 // CHECK-RV32-NEXT: entry:
924 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.i32.nxv8i64.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i32 [[VL:%.*]])
925 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
927 // CHECK-RV64-LABEL: @test_sf_vc_v_vvv_u64m8(
928 // CHECK-RV64-NEXT: entry:
929 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.i64.nxv8i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 [[VL:%.*]])
930 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
932 vuint64m8_t test_sf_vc_v_vvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
933 return __riscv_sf_vc_v_vvv_u64m8(p27_26, vd, vs2, vs1, vl);
936 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf8(
937 // CHECK-RV32-NEXT: entry:
938 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i8.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
939 // CHECK-RV32-NEXT: ret void
941 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf8(
942 // CHECK-RV64-NEXT: entry:
943 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i8.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
944 // CHECK-RV64-NEXT: ret void
946 void test_sf_vc_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
947 __riscv_sf_vc_xvv_se_u8mf8(p27_26, vd, vs2, rs1, vl);
950 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf4(
951 // CHECK-RV32-NEXT: entry:
952 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i8.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
953 // CHECK-RV32-NEXT: ret void
955 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf4(
956 // CHECK-RV64-NEXT: entry:
957 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i8.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
958 // CHECK-RV64-NEXT: ret void
960 void test_sf_vc_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
961 __riscv_sf_vc_xvv_se_u8mf4(p27_26, vd, vs2, rs1, vl);
964 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8mf2(
965 // CHECK-RV32-NEXT: entry:
966 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i8.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
967 // CHECK-RV32-NEXT: ret void
969 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8mf2(
970 // CHECK-RV64-NEXT: entry:
971 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i8.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
972 // CHECK-RV64-NEXT: ret void
974 void test_sf_vc_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
975 __riscv_sf_vc_xvv_se_u8mf2(p27_26, vd, vs2, rs1, vl);
978 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m1(
979 // CHECK-RV32-NEXT: entry:
980 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i8.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
981 // CHECK-RV32-NEXT: ret void
983 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m1(
984 // CHECK-RV64-NEXT: entry:
985 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i8.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
986 // CHECK-RV64-NEXT: ret void
988 void test_sf_vc_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
989 __riscv_sf_vc_xvv_se_u8m1(p27_26, vd, vs2, rs1, vl);
992 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m2(
993 // CHECK-RV32-NEXT: entry:
994 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i8.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
995 // CHECK-RV32-NEXT: ret void
997 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m2(
998 // CHECK-RV64-NEXT: entry:
999 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i8.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1000 // CHECK-RV64-NEXT: ret void
1002 void test_sf_vc_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
1003 __riscv_sf_vc_xvv_se_u8m2(p27_26, vd, vs2, rs1, vl);
1006 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m4(
1007 // CHECK-RV32-NEXT: entry:
1008 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i8.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1009 // CHECK-RV32-NEXT: ret void
1011 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m4(
1012 // CHECK-RV64-NEXT: entry:
1013 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i8.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1014 // CHECK-RV64-NEXT: ret void
1016 void test_sf_vc_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
1017 __riscv_sf_vc_xvv_se_u8m4(p27_26, vd, vs2, rs1, vl);
1020 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u8m8(
1021 // CHECK-RV32-NEXT: entry:
1022 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv64i8.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1023 // CHECK-RV32-NEXT: ret void
1025 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u8m8(
1026 // CHECK-RV64-NEXT: entry:
1027 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv64i8.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1028 // CHECK-RV64-NEXT: ret void
1030 void test_sf_vc_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
1031 __riscv_sf_vc_xvv_se_u8m8(p27_26, vd, vs2, rs1, vl);
1034 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16mf4(
1035 // CHECK-RV32-NEXT: entry:
1036 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1037 // CHECK-RV32-NEXT: ret void
1039 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16mf4(
1040 // CHECK-RV64-NEXT: entry:
1041 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i16.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1042 // CHECK-RV64-NEXT: ret void
1044 void test_sf_vc_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
1045 __riscv_sf_vc_xvv_se_u16mf4(p27_26, vd, vs2, rs1, vl);
1048 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16mf2(
1049 // CHECK-RV32-NEXT: entry:
1050 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1051 // CHECK-RV32-NEXT: ret void
1053 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16mf2(
1054 // CHECK-RV64-NEXT: entry:
1055 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i16.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1056 // CHECK-RV64-NEXT: ret void
1058 void test_sf_vc_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
1059 __riscv_sf_vc_xvv_se_u16mf2(p27_26, vd, vs2, rs1, vl);
1062 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m1(
1063 // CHECK-RV32-NEXT: entry:
1064 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1065 // CHECK-RV32-NEXT: ret void
1067 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m1(
1068 // CHECK-RV64-NEXT: entry:
1069 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i16.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1070 // CHECK-RV64-NEXT: ret void
1072 void test_sf_vc_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
1073 __riscv_sf_vc_xvv_se_u16m1(p27_26, vd, vs2, rs1, vl);
1076 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m2(
1077 // CHECK-RV32-NEXT: entry:
1078 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1079 // CHECK-RV32-NEXT: ret void
1081 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m2(
1082 // CHECK-RV64-NEXT: entry:
1083 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i16.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1084 // CHECK-RV64-NEXT: ret void
1086 void test_sf_vc_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
1087 __riscv_sf_vc_xvv_se_u16m2(p27_26, vd, vs2, rs1, vl);
1090 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m4(
1091 // CHECK-RV32-NEXT: entry:
1092 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1093 // CHECK-RV32-NEXT: ret void
1095 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m4(
1096 // CHECK-RV64-NEXT: entry:
1097 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i16.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1098 // CHECK-RV64-NEXT: ret void
1100 void test_sf_vc_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
1101 __riscv_sf_vc_xvv_se_u16m4(p27_26, vd, vs2, rs1, vl);
1104 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u16m8(
1105 // CHECK-RV32-NEXT: entry:
1106 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv32i16.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1107 // CHECK-RV32-NEXT: ret void
1109 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u16m8(
1110 // CHECK-RV64-NEXT: entry:
1111 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv32i16.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1112 // CHECK-RV64-NEXT: ret void
1114 void test_sf_vc_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
1115 __riscv_sf_vc_xvv_se_u16m8(p27_26, vd, vs2, rs1, vl);
1118 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32mf2(
1119 // CHECK-RV32-NEXT: entry:
1120 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1121 // CHECK-RV32-NEXT: ret void
1123 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32mf2(
1124 // CHECK-RV64-NEXT: entry:
1125 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1126 // CHECK-RV64-NEXT: ret void
1128 void test_sf_vc_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
1129 __riscv_sf_vc_xvv_se_u32mf2(p27_26, vd, vs2, rs1, vl);
1132 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m1(
1133 // CHECK-RV32-NEXT: entry:
1134 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1135 // CHECK-RV32-NEXT: ret void
1137 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m1(
1138 // CHECK-RV64-NEXT: entry:
1139 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1140 // CHECK-RV64-NEXT: ret void
1142 void test_sf_vc_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
1143 __riscv_sf_vc_xvv_se_u32m1(p27_26, vd, vs2, rs1, vl);
1146 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m2(
1147 // CHECK-RV32-NEXT: entry:
1148 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1149 // CHECK-RV32-NEXT: ret void
1151 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m2(
1152 // CHECK-RV64-NEXT: entry:
1153 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1154 // CHECK-RV64-NEXT: ret void
1156 void test_sf_vc_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
1157 __riscv_sf_vc_xvv_se_u32m2(p27_26, vd, vs2, rs1, vl);
1160 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m4(
1161 // CHECK-RV32-NEXT: entry:
1162 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1163 // CHECK-RV32-NEXT: ret void
1165 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m4(
1166 // CHECK-RV64-NEXT: entry:
1167 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1168 // CHECK-RV64-NEXT: ret void
1170 void test_sf_vc_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
1171 __riscv_sf_vc_xvv_se_u32m4(p27_26, vd, vs2, rs1, vl);
1174 // CHECK-RV32-LABEL: @test_sf_vc_xvv_se_u32m8(
1175 // CHECK-RV32-NEXT: entry:
1176 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1177 // CHECK-RV32-NEXT: ret void
1179 // CHECK-RV64-LABEL: @test_sf_vc_xvv_se_u32m8(
1180 // CHECK-RV64-NEXT: entry:
1181 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvv.se.i64.nxv16i32.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1182 // CHECK-RV64-NEXT: ret void
1184 void test_sf_vc_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
1185 __riscv_sf_vc_xvv_se_u32m8(p27_26, vd, vs2, rs1, vl);
1188 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf8(
1189 // CHECK-RV32-NEXT: entry:
1190 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i32.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1191 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1193 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf8(
1194 // CHECK-RV64-NEXT: entry:
1195 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.i64.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1196 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1198 vuint8mf8_t test_sf_vc_v_xvv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
1199 return __riscv_sf_vc_v_xvv_se_u8mf8(p27_26, vd, vs2, rs1, vl);
1202 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf4(
1203 // CHECK-RV32-NEXT: entry:
1204 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i32.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1205 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1207 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf4(
1208 // CHECK-RV64-NEXT: entry:
1209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.i64.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1210 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1212 vuint8mf4_t test_sf_vc_v_xvv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
1213 return __riscv_sf_vc_v_xvv_se_u8mf4(p27_26, vd, vs2, rs1, vl);
1216 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8mf2(
1217 // CHECK-RV32-NEXT: entry:
1218 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i32.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1219 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1221 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8mf2(
1222 // CHECK-RV64-NEXT: entry:
1223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.i64.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1224 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1226 vuint8mf2_t test_sf_vc_v_xvv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
1227 return __riscv_sf_vc_v_xvv_se_u8mf2(p27_26, vd, vs2, rs1, vl);
1230 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m1(
1231 // CHECK-RV32-NEXT: entry:
1232 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i32.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1233 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1235 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m1(
1236 // CHECK-RV64-NEXT: entry:
1237 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.i64.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1238 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1240 vuint8m1_t test_sf_vc_v_xvv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
1241 return __riscv_sf_vc_v_xvv_se_u8m1(p27_26, vd, vs2, rs1, vl);
1244 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m2(
1245 // CHECK-RV32-NEXT: entry:
1246 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i32.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1247 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1249 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m2(
1250 // CHECK-RV64-NEXT: entry:
1251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.i64.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1252 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1254 vuint8m2_t test_sf_vc_v_xvv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
1255 return __riscv_sf_vc_v_xvv_se_u8m2(p27_26, vd, vs2, rs1, vl);
1258 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m4(
1259 // CHECK-RV32-NEXT: entry:
1260 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i32.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1261 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1263 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m4(
1264 // CHECK-RV64-NEXT: entry:
1265 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.i64.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1266 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1268 vuint8m4_t test_sf_vc_v_xvv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
1269 return __riscv_sf_vc_v_xvv_se_u8m4(p27_26, vd, vs2, rs1, vl);
1272 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u8m8(
1273 // CHECK-RV32-NEXT: entry:
1274 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i32.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1275 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1277 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u8m8(
1278 // CHECK-RV64-NEXT: entry:
1279 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.i64.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1280 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1282 vuint8m8_t test_sf_vc_v_xvv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
1283 return __riscv_sf_vc_v_xvv_se_u8m8(p27_26, vd, vs2, rs1, vl);
1286 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16mf4(
1287 // CHECK-RV32-NEXT: entry:
1288 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i32.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1289 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1291 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16mf4(
1292 // CHECK-RV64-NEXT: entry:
1293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.i64.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1294 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1296 vuint16mf4_t test_sf_vc_v_xvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
1297 return __riscv_sf_vc_v_xvv_se_u16mf4(p27_26, vd, vs2, rs1, vl);
1300 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16mf2(
1301 // CHECK-RV32-NEXT: entry:
1302 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i32.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1303 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1305 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16mf2(
1306 // CHECK-RV64-NEXT: entry:
1307 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.i64.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1308 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1310 vuint16mf2_t test_sf_vc_v_xvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
1311 return __riscv_sf_vc_v_xvv_se_u16mf2(p27_26, vd, vs2, rs1, vl);
1314 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m1(
1315 // CHECK-RV32-NEXT: entry:
1316 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i32.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1317 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1319 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m1(
1320 // CHECK-RV64-NEXT: entry:
1321 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.i64.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1322 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1324 vuint16m1_t test_sf_vc_v_xvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
1325 return __riscv_sf_vc_v_xvv_se_u16m1(p27_26, vd, vs2, rs1, vl);
1328 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m2(
1329 // CHECK-RV32-NEXT: entry:
1330 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i32.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1331 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1333 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m2(
1334 // CHECK-RV64-NEXT: entry:
1335 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.i64.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1336 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1338 vuint16m2_t test_sf_vc_v_xvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
1339 return __riscv_sf_vc_v_xvv_se_u16m2(p27_26, vd, vs2, rs1, vl);
1342 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m4(
1343 // CHECK-RV32-NEXT: entry:
1344 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i32.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1345 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1347 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m4(
1348 // CHECK-RV64-NEXT: entry:
1349 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.i64.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1350 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1352 vuint16m4_t test_sf_vc_v_xvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
1353 return __riscv_sf_vc_v_xvv_se_u16m4(p27_26, vd, vs2, rs1, vl);
1356 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u16m8(
1357 // CHECK-RV32-NEXT: entry:
1358 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i32.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1359 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1361 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u16m8(
1362 // CHECK-RV64-NEXT: entry:
1363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.i64.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1364 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1366 vuint16m8_t test_sf_vc_v_xvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
1367 return __riscv_sf_vc_v_xvv_se_u16m8(p27_26, vd, vs2, rs1, vl);
1370 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32mf2(
1371 // CHECK-RV32-NEXT: entry:
1372 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1373 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1375 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32mf2(
1376 // CHECK-RV64-NEXT: entry:
1377 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.i64.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1378 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1380 vuint32mf2_t test_sf_vc_v_xvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
1381 return __riscv_sf_vc_v_xvv_se_u32mf2(p27_26, vd, vs2, rs1, vl);
1384 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m1(
1385 // CHECK-RV32-NEXT: entry:
1386 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1387 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1389 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m1(
1390 // CHECK-RV64-NEXT: entry:
1391 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.i64.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1392 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1394 vuint32m1_t test_sf_vc_v_xvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
1395 return __riscv_sf_vc_v_xvv_se_u32m1(p27_26, vd, vs2, rs1, vl);
1398 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m2(
1399 // CHECK-RV32-NEXT: entry:
1400 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1401 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1403 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m2(
1404 // CHECK-RV64-NEXT: entry:
1405 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.i64.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1406 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1408 vuint32m2_t test_sf_vc_v_xvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
1409 return __riscv_sf_vc_v_xvv_se_u32m2(p27_26, vd, vs2, rs1, vl);
1412 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m4(
1413 // CHECK-RV32-NEXT: entry:
1414 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1415 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1417 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m4(
1418 // CHECK-RV64-NEXT: entry:
1419 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.i64.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1420 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1422 vuint32m4_t test_sf_vc_v_xvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
1423 return __riscv_sf_vc_v_xvv_se_u32m4(p27_26, vd, vs2, rs1, vl);
1426 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_se_u32m8(
1427 // CHECK-RV32-NEXT: entry:
1428 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1429 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1431 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_se_u32m8(
1432 // CHECK-RV64-NEXT: entry:
1433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.i64.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1434 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1436 vuint32m8_t test_sf_vc_v_xvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
1437 return __riscv_sf_vc_v_xvv_se_u32m8(p27_26, vd, vs2, rs1, vl);
1440 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf8(
1441 // CHECK-RV32-NEXT: entry:
1442 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i32.i8.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1443 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1445 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf8(
1446 // CHECK-RV64-NEXT: entry:
1447 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.i64.i8.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1448 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
1450 vuint8mf8_t test_sf_vc_v_xvv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
1451 return __riscv_sf_vc_v_xvv_u8mf8(p27_26, vd, vs2, rs1, vl);
1454 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf4(
1455 // CHECK-RV32-NEXT: entry:
1456 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i32.i8.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1457 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1459 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf4(
1460 // CHECK-RV64-NEXT: entry:
1461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.i64.i8.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1462 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
1464 vuint8mf4_t test_sf_vc_v_xvv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
1465 return __riscv_sf_vc_v_xvv_u8mf4(p27_26, vd, vs2, rs1, vl);
1468 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8mf2(
1469 // CHECK-RV32-NEXT: entry:
1470 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i32.i8.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1471 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1473 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8mf2(
1474 // CHECK-RV64-NEXT: entry:
1475 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.i64.i8.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1476 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
1478 vuint8mf2_t test_sf_vc_v_xvv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
1479 return __riscv_sf_vc_v_xvv_u8mf2(p27_26, vd, vs2, rs1, vl);
1482 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m1(
1483 // CHECK-RV32-NEXT: entry:
1484 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i32.i8.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1485 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1487 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m1(
1488 // CHECK-RV64-NEXT: entry:
1489 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.i64.i8.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1490 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
1492 vuint8m1_t test_sf_vc_v_xvv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
1493 return __riscv_sf_vc_v_xvv_u8m1(p27_26, vd, vs2, rs1, vl);
1496 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m2(
1497 // CHECK-RV32-NEXT: entry:
1498 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i32.i8.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1499 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1501 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m2(
1502 // CHECK-RV64-NEXT: entry:
1503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.i64.i8.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1504 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
1506 vuint8m2_t test_sf_vc_v_xvv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
1507 return __riscv_sf_vc_v_xvv_u8m2(p27_26, vd, vs2, rs1, vl);
1510 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m4(
1511 // CHECK-RV32-NEXT: entry:
1512 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i32.i8.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1513 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1515 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m4(
1516 // CHECK-RV64-NEXT: entry:
1517 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.i64.i8.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1518 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
1520 vuint8m4_t test_sf_vc_v_xvv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
1521 return __riscv_sf_vc_v_xvv_u8m4(p27_26, vd, vs2, rs1, vl);
1524 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u8m8(
1525 // CHECK-RV32-NEXT: entry:
1526 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i32.i8.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1527 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1529 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u8m8(
1530 // CHECK-RV64-NEXT: entry:
1531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.i64.i8.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1532 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
1534 vuint8m8_t test_sf_vc_v_xvv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, uint8_t rs1, size_t vl) {
1535 return __riscv_sf_vc_v_xvv_u8m8(p27_26, vd, vs2, rs1, vl);
1538 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16mf4(
1539 // CHECK-RV32-NEXT: entry:
1540 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i32.i16.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1541 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1543 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16mf4(
1544 // CHECK-RV64-NEXT: entry:
1545 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.i64.i16.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1546 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1548 vuint16mf4_t test_sf_vc_v_xvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
1549 return __riscv_sf_vc_v_xvv_u16mf4(p27_26, vd, vs2, rs1, vl);
1552 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16mf2(
1553 // CHECK-RV32-NEXT: entry:
1554 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i32.i16.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1555 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1557 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16mf2(
1558 // CHECK-RV64-NEXT: entry:
1559 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.i64.i16.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1560 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1562 vuint16mf2_t test_sf_vc_v_xvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
1563 return __riscv_sf_vc_v_xvv_u16mf2(p27_26, vd, vs2, rs1, vl);
1566 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m1(
1567 // CHECK-RV32-NEXT: entry:
1568 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i32.i16.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1569 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1571 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m1(
1572 // CHECK-RV64-NEXT: entry:
1573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.i64.i16.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1574 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1576 vuint16m1_t test_sf_vc_v_xvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
1577 return __riscv_sf_vc_v_xvv_u16m1(p27_26, vd, vs2, rs1, vl);
1580 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m2(
1581 // CHECK-RV32-NEXT: entry:
1582 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i32.i16.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1583 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1585 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m2(
1586 // CHECK-RV64-NEXT: entry:
1587 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.i64.i16.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1588 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1590 vuint16m2_t test_sf_vc_v_xvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
1591 return __riscv_sf_vc_v_xvv_u16m2(p27_26, vd, vs2, rs1, vl);
1594 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m4(
1595 // CHECK-RV32-NEXT: entry:
1596 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i32.i16.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1597 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1599 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m4(
1600 // CHECK-RV64-NEXT: entry:
1601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.i64.i16.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1602 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1604 vuint16m4_t test_sf_vc_v_xvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
1605 return __riscv_sf_vc_v_xvv_u16m4(p27_26, vd, vs2, rs1, vl);
1608 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u16m8(
1609 // CHECK-RV32-NEXT: entry:
1610 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i32.i16.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1611 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1613 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u16m8(
1614 // CHECK-RV64-NEXT: entry:
1615 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.i64.i16.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1616 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1618 vuint16m8_t test_sf_vc_v_xvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, uint16_t rs1, size_t vl) {
1619 return __riscv_sf_vc_v_xvv_u16m8(p27_26, vd, vs2, rs1, vl);
1622 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32mf2(
1623 // CHECK-RV32-NEXT: entry:
1624 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1625 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1627 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32mf2(
1628 // CHECK-RV64-NEXT: entry:
1629 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.i64.i32.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1630 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1632 vuint32mf2_t test_sf_vc_v_xvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
1633 return __riscv_sf_vc_v_xvv_u32mf2(p27_26, vd, vs2, rs1, vl);
1636 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m1(
1637 // CHECK-RV32-NEXT: entry:
1638 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1639 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1641 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m1(
1642 // CHECK-RV64-NEXT: entry:
1643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.i64.i32.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1644 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1646 vuint32m1_t test_sf_vc_v_xvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
1647 return __riscv_sf_vc_v_xvv_u32m1(p27_26, vd, vs2, rs1, vl);
1650 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m2(
1651 // CHECK-RV32-NEXT: entry:
1652 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1653 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1655 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m2(
1656 // CHECK-RV64-NEXT: entry:
1657 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.i64.i32.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1658 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1660 vuint32m2_t test_sf_vc_v_xvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
1661 return __riscv_sf_vc_v_xvv_u32m2(p27_26, vd, vs2, rs1, vl);
1664 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m4(
1665 // CHECK-RV32-NEXT: entry:
1666 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1667 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1669 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m4(
1670 // CHECK-RV64-NEXT: entry:
1671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.i64.i32.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1672 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1674 vuint32m4_t test_sf_vc_v_xvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
1675 return __riscv_sf_vc_v_xvv_u32m4(p27_26, vd, vs2, rs1, vl);
1678 // CHECK-RV32-LABEL: @test_sf_vc_v_xvv_u32m8(
1679 // CHECK-RV32-NEXT: entry:
1680 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1681 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1683 // CHECK-RV64-LABEL: @test_sf_vc_v_xvv_u32m8(
1684 // CHECK-RV64-NEXT: entry:
1685 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.i64.i32.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1686 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1688 vuint32m8_t test_sf_vc_v_xvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, uint32_t rs1, size_t vl) {
1689 return __riscv_sf_vc_v_xvv_u32m8(p27_26, vd, vs2, rs1, vl);
1692 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf8(
1693 // CHECK-RV32-NEXT: entry:
1694 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i8.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1695 // CHECK-RV32-NEXT: ret void
1697 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf8(
1698 // CHECK-RV64-NEXT: entry:
1699 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i8.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1700 // CHECK-RV64-NEXT: ret void
1702 void test_sf_vc_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
1703 __riscv_sf_vc_ivv_se_u8mf8(p27_26, vd, vs2, simm5, vl);
1706 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf4(
1707 // CHECK-RV32-NEXT: entry:
1708 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i8.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1709 // CHECK-RV32-NEXT: ret void
1711 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf4(
1712 // CHECK-RV64-NEXT: entry:
1713 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i8.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1714 // CHECK-RV64-NEXT: ret void
1716 void test_sf_vc_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
1717 __riscv_sf_vc_ivv_se_u8mf4(p27_26, vd, vs2, simm5, vl);
1720 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8mf2(
1721 // CHECK-RV32-NEXT: entry:
1722 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i8.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1723 // CHECK-RV32-NEXT: ret void
1725 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8mf2(
1726 // CHECK-RV64-NEXT: entry:
1727 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i8.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1728 // CHECK-RV64-NEXT: ret void
1730 void test_sf_vc_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
1731 __riscv_sf_vc_ivv_se_u8mf2(p27_26, vd, vs2, simm5, vl);
1734 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m1(
1735 // CHECK-RV32-NEXT: entry:
1736 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i8.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1737 // CHECK-RV32-NEXT: ret void
1739 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m1(
1740 // CHECK-RV64-NEXT: entry:
1741 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i8.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1742 // CHECK-RV64-NEXT: ret void
1744 void test_sf_vc_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
1745 __riscv_sf_vc_ivv_se_u8m1(p27_26, vd, vs2, simm5, vl);
1748 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m2(
1749 // CHECK-RV32-NEXT: entry:
1750 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i8.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1751 // CHECK-RV32-NEXT: ret void
1753 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m2(
1754 // CHECK-RV64-NEXT: entry:
1755 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i8.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1756 // CHECK-RV64-NEXT: ret void
1758 void test_sf_vc_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
1759 __riscv_sf_vc_ivv_se_u8m2(p27_26, vd, vs2, simm5, vl);
1762 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m4(
1763 // CHECK-RV32-NEXT: entry:
1764 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i8.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1765 // CHECK-RV32-NEXT: ret void
1767 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m4(
1768 // CHECK-RV64-NEXT: entry:
1769 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i8.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1770 // CHECK-RV64-NEXT: ret void
1772 void test_sf_vc_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
1773 __riscv_sf_vc_ivv_se_u8m4(p27_26, vd, vs2, simm5, vl);
1776 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u8m8(
1777 // CHECK-RV32-NEXT: entry:
1778 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv64i8.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1779 // CHECK-RV32-NEXT: ret void
1781 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u8m8(
1782 // CHECK-RV64-NEXT: entry:
1783 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv64i8.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1784 // CHECK-RV64-NEXT: ret void
1786 void test_sf_vc_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
1787 __riscv_sf_vc_ivv_se_u8m8(p27_26, vd, vs2, simm5, vl);
1790 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16mf4(
1791 // CHECK-RV32-NEXT: entry:
1792 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1793 // CHECK-RV32-NEXT: ret void
1795 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16mf4(
1796 // CHECK-RV64-NEXT: entry:
1797 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i16.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1798 // CHECK-RV64-NEXT: ret void
1800 void test_sf_vc_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
1801 __riscv_sf_vc_ivv_se_u16mf4(p27_26, vd, vs2, simm5, vl);
1804 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16mf2(
1805 // CHECK-RV32-NEXT: entry:
1806 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1807 // CHECK-RV32-NEXT: ret void
1809 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16mf2(
1810 // CHECK-RV64-NEXT: entry:
1811 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i16.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1812 // CHECK-RV64-NEXT: ret void
1814 void test_sf_vc_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
1815 __riscv_sf_vc_ivv_se_u16mf2(p27_26, vd, vs2, simm5, vl);
1818 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m1(
1819 // CHECK-RV32-NEXT: entry:
1820 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1821 // CHECK-RV32-NEXT: ret void
1823 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m1(
1824 // CHECK-RV64-NEXT: entry:
1825 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i16.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1826 // CHECK-RV64-NEXT: ret void
1828 void test_sf_vc_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
1829 __riscv_sf_vc_ivv_se_u16m1(p27_26, vd, vs2, simm5, vl);
1832 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m2(
1833 // CHECK-RV32-NEXT: entry:
1834 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1835 // CHECK-RV32-NEXT: ret void
1837 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m2(
1838 // CHECK-RV64-NEXT: entry:
1839 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i16.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1840 // CHECK-RV64-NEXT: ret void
1842 void test_sf_vc_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
1843 __riscv_sf_vc_ivv_se_u16m2(p27_26, vd, vs2, simm5, vl);
1846 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m4(
1847 // CHECK-RV32-NEXT: entry:
1848 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1849 // CHECK-RV32-NEXT: ret void
1851 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m4(
1852 // CHECK-RV64-NEXT: entry:
1853 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i16.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1854 // CHECK-RV64-NEXT: ret void
1856 void test_sf_vc_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
1857 __riscv_sf_vc_ivv_se_u16m4(p27_26, vd, vs2, simm5, vl);
1860 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u16m8(
1861 // CHECK-RV32-NEXT: entry:
1862 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv32i16.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1863 // CHECK-RV32-NEXT: ret void
1865 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u16m8(
1866 // CHECK-RV64-NEXT: entry:
1867 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv32i16.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1868 // CHECK-RV64-NEXT: ret void
1870 void test_sf_vc_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
1871 __riscv_sf_vc_ivv_se_u16m8(p27_26, vd, vs2, simm5, vl);
1874 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32mf2(
1875 // CHECK-RV32-NEXT: entry:
1876 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1877 // CHECK-RV32-NEXT: ret void
1879 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32mf2(
1880 // CHECK-RV64-NEXT: entry:
1881 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1882 // CHECK-RV64-NEXT: ret void
1884 void test_sf_vc_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
1885 __riscv_sf_vc_ivv_se_u32mf2(p27_26, vd, vs2, simm5, vl);
1888 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m1(
1889 // CHECK-RV32-NEXT: entry:
1890 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1891 // CHECK-RV32-NEXT: ret void
1893 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m1(
1894 // CHECK-RV64-NEXT: entry:
1895 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1896 // CHECK-RV64-NEXT: ret void
1898 void test_sf_vc_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
1899 __riscv_sf_vc_ivv_se_u32m1(p27_26, vd, vs2, simm5, vl);
1902 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m2(
1903 // CHECK-RV32-NEXT: entry:
1904 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1905 // CHECK-RV32-NEXT: ret void
1907 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m2(
1908 // CHECK-RV64-NEXT: entry:
1909 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1910 // CHECK-RV64-NEXT: ret void
1912 void test_sf_vc_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
1913 __riscv_sf_vc_ivv_se_u32m2(p27_26, vd, vs2, simm5, vl);
1916 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m4(
1917 // CHECK-RV32-NEXT: entry:
1918 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1919 // CHECK-RV32-NEXT: ret void
1921 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m4(
1922 // CHECK-RV64-NEXT: entry:
1923 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1924 // CHECK-RV64-NEXT: ret void
1926 void test_sf_vc_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
1927 __riscv_sf_vc_ivv_se_u32m4(p27_26, vd, vs2, simm5, vl);
1930 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u32m8(
1931 // CHECK-RV32-NEXT: entry:
1932 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv16i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1933 // CHECK-RV32-NEXT: ret void
1935 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u32m8(
1936 // CHECK-RV64-NEXT: entry:
1937 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv16i32.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1938 // CHECK-RV64-NEXT: ret void
1940 void test_sf_vc_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
1941 __riscv_sf_vc_ivv_se_u32m8(p27_26, vd, vs2, simm5, vl);
1944 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m1(
1945 // CHECK-RV32-NEXT: entry:
1946 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv1i64.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1947 // CHECK-RV32-NEXT: ret void
1949 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m1(
1950 // CHECK-RV64-NEXT: entry:
1951 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv1i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1952 // CHECK-RV64-NEXT: ret void
1954 void test_sf_vc_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
1955 __riscv_sf_vc_ivv_se_u64m1(p27_26, vd, vs2, simm5, vl);
1958 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m2(
1959 // CHECK-RV32-NEXT: entry:
1960 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv2i64.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1961 // CHECK-RV32-NEXT: ret void
1963 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m2(
1964 // CHECK-RV64-NEXT: entry:
1965 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv2i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1966 // CHECK-RV64-NEXT: ret void
1968 void test_sf_vc_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
1969 __riscv_sf_vc_ivv_se_u64m2(p27_26, vd, vs2, simm5, vl);
1972 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m4(
1973 // CHECK-RV32-NEXT: entry:
1974 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv4i64.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1975 // CHECK-RV32-NEXT: ret void
1977 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m4(
1978 // CHECK-RV64-NEXT: entry:
1979 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv4i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1980 // CHECK-RV64-NEXT: ret void
1982 void test_sf_vc_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
1983 __riscv_sf_vc_ivv_se_u64m4(p27_26, vd, vs2, simm5, vl);
1986 // CHECK-RV32-LABEL: @test_sf_vc_ivv_se_u64m8(
1987 // CHECK-RV32-NEXT: entry:
1988 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i32.nxv8i64.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1989 // CHECK-RV32-NEXT: ret void
1991 // CHECK-RV64-LABEL: @test_sf_vc_ivv_se_u64m8(
1992 // CHECK-RV64-NEXT: entry:
1993 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivv.se.i64.nxv8i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1994 // CHECK-RV64-NEXT: ret void
1996 void test_sf_vc_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
1997 __riscv_sf_vc_ivv_se_u64m8(p27_26, vd, vs2, simm5, vl);
2000 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf8(
2001 // CHECK-RV32-NEXT: entry:
2002 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i32.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2003 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
2005 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf8(
2006 // CHECK-RV64-NEXT: entry:
2007 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.i64.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2008 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
2010 vuint8mf8_t test_sf_vc_v_ivv_se_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
2011 return __riscv_sf_vc_v_ivv_se_u8mf8(p27_26, vd, vs2, simm5, vl);
2014 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf4(
2015 // CHECK-RV32-NEXT: entry:
2016 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i32.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2017 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
2019 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf4(
2020 // CHECK-RV64-NEXT: entry:
2021 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.i64.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2022 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
2024 vuint8mf4_t test_sf_vc_v_ivv_se_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
2025 return __riscv_sf_vc_v_ivv_se_u8mf4(p27_26, vd, vs2, simm5, vl);
2028 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8mf2(
2029 // CHECK-RV32-NEXT: entry:
2030 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i32.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2031 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
2033 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8mf2(
2034 // CHECK-RV64-NEXT: entry:
2035 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.i64.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2036 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
2038 vuint8mf2_t test_sf_vc_v_ivv_se_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
2039 return __riscv_sf_vc_v_ivv_se_u8mf2(p27_26, vd, vs2, simm5, vl);
2042 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m1(
2043 // CHECK-RV32-NEXT: entry:
2044 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i32.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2045 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
2047 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m1(
2048 // CHECK-RV64-NEXT: entry:
2049 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.i64.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2050 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
2052 vuint8m1_t test_sf_vc_v_ivv_se_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
2053 return __riscv_sf_vc_v_ivv_se_u8m1(p27_26, vd, vs2, simm5, vl);
2056 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m2(
2057 // CHECK-RV32-NEXT: entry:
2058 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i32.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2059 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
2061 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m2(
2062 // CHECK-RV64-NEXT: entry:
2063 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.i64.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2064 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
2066 vuint8m2_t test_sf_vc_v_ivv_se_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
2067 return __riscv_sf_vc_v_ivv_se_u8m2(p27_26, vd, vs2, simm5, vl);
2070 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m4(
2071 // CHECK-RV32-NEXT: entry:
2072 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i32.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2073 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
2075 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m4(
2076 // CHECK-RV64-NEXT: entry:
2077 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.i64.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2078 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
2080 vuint8m4_t test_sf_vc_v_ivv_se_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
2081 return __riscv_sf_vc_v_ivv_se_u8m4(p27_26, vd, vs2, simm5, vl);
2084 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u8m8(
2085 // CHECK-RV32-NEXT: entry:
2086 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i32.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2087 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
2089 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u8m8(
2090 // CHECK-RV64-NEXT: entry:
2091 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.i64.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2092 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
2094 vuint8m8_t test_sf_vc_v_ivv_se_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
2095 return __riscv_sf_vc_v_ivv_se_u8m8(p27_26, vd, vs2, simm5, vl);
2098 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16mf4(
2099 // CHECK-RV32-NEXT: entry:
2100 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i32.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2101 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2103 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16mf4(
2104 // CHECK-RV64-NEXT: entry:
2105 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.i64.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2106 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2108 vuint16mf4_t test_sf_vc_v_ivv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
2109 return __riscv_sf_vc_v_ivv_se_u16mf4(p27_26, vd, vs2, simm5, vl);
2112 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16mf2(
2113 // CHECK-RV32-NEXT: entry:
2114 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i32.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2115 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2117 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16mf2(
2118 // CHECK-RV64-NEXT: entry:
2119 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.i64.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2120 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2122 vuint16mf2_t test_sf_vc_v_ivv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
2123 return __riscv_sf_vc_v_ivv_se_u16mf2(p27_26, vd, vs2, simm5, vl);
2126 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m1(
2127 // CHECK-RV32-NEXT: entry:
2128 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i32.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2129 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2131 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m1(
2132 // CHECK-RV64-NEXT: entry:
2133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.i64.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2134 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2136 vuint16m1_t test_sf_vc_v_ivv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
2137 return __riscv_sf_vc_v_ivv_se_u16m1(p27_26, vd, vs2, simm5, vl);
2140 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m2(
2141 // CHECK-RV32-NEXT: entry:
2142 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i32.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2143 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2145 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m2(
2146 // CHECK-RV64-NEXT: entry:
2147 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.i64.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2148 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2150 vuint16m2_t test_sf_vc_v_ivv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
2151 return __riscv_sf_vc_v_ivv_se_u16m2(p27_26, vd, vs2, simm5, vl);
2154 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m4(
2155 // CHECK-RV32-NEXT: entry:
2156 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i32.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2157 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2159 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m4(
2160 // CHECK-RV64-NEXT: entry:
2161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.i64.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2162 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2164 vuint16m4_t test_sf_vc_v_ivv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
2165 return __riscv_sf_vc_v_ivv_se_u16m4(p27_26, vd, vs2, simm5, vl);
2168 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u16m8(
2169 // CHECK-RV32-NEXT: entry:
2170 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i32.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2171 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2173 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u16m8(
2174 // CHECK-RV64-NEXT: entry:
2175 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.i64.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2176 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2178 vuint16m8_t test_sf_vc_v_ivv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
2179 return __riscv_sf_vc_v_ivv_se_u16m8(p27_26, vd, vs2, simm5, vl);
2182 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32mf2(
2183 // CHECK-RV32-NEXT: entry:
2184 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2185 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2187 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32mf2(
2188 // CHECK-RV64-NEXT: entry:
2189 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.i64.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2190 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2192 vuint32mf2_t test_sf_vc_v_ivv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
2193 return __riscv_sf_vc_v_ivv_se_u32mf2(p27_26, vd, vs2, simm5, vl);
2196 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m1(
2197 // CHECK-RV32-NEXT: entry:
2198 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2199 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2201 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m1(
2202 // CHECK-RV64-NEXT: entry:
2203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.i64.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2204 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2206 vuint32m1_t test_sf_vc_v_ivv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
2207 return __riscv_sf_vc_v_ivv_se_u32m1(p27_26, vd, vs2, simm5, vl);
2210 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m2(
2211 // CHECK-RV32-NEXT: entry:
2212 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2213 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2215 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m2(
2216 // CHECK-RV64-NEXT: entry:
2217 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.i64.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2218 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2220 vuint32m2_t test_sf_vc_v_ivv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
2221 return __riscv_sf_vc_v_ivv_se_u32m2(p27_26, vd, vs2, simm5, vl);
2224 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m4(
2225 // CHECK-RV32-NEXT: entry:
2226 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2227 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2229 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m4(
2230 // CHECK-RV64-NEXT: entry:
2231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.i64.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2232 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2234 vuint32m4_t test_sf_vc_v_ivv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
2235 return __riscv_sf_vc_v_ivv_se_u32m4(p27_26, vd, vs2, simm5, vl);
2238 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u32m8(
2239 // CHECK-RV32-NEXT: entry:
2240 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2241 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2243 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u32m8(
2244 // CHECK-RV64-NEXT: entry:
2245 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.i64.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2246 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2248 vuint32m8_t test_sf_vc_v_ivv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
2249 return __riscv_sf_vc_v_ivv_se_u32m8(p27_26, vd, vs2, simm5, vl);
2252 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m1(
2253 // CHECK-RV32-NEXT: entry:
2254 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2255 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2257 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m1(
2258 // CHECK-RV64-NEXT: entry:
2259 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2260 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2262 vuint64m1_t test_sf_vc_v_ivv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
2263 return __riscv_sf_vc_v_ivv_se_u64m1(p27_26, vd, vs2, simm5, vl);
2266 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m2(
2267 // CHECK-RV32-NEXT: entry:
2268 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2269 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2271 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m2(
2272 // CHECK-RV64-NEXT: entry:
2273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2274 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2276 vuint64m2_t test_sf_vc_v_ivv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
2277 return __riscv_sf_vc_v_ivv_se_u64m2(p27_26, vd, vs2, simm5, vl);
2280 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m4(
2281 // CHECK-RV32-NEXT: entry:
2282 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2283 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2285 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m4(
2286 // CHECK-RV64-NEXT: entry:
2287 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2288 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2290 vuint64m4_t test_sf_vc_v_ivv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
2291 return __riscv_sf_vc_v_ivv_se_u64m4(p27_26, vd, vs2, simm5, vl);
2294 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_se_u64m8(
2295 // CHECK-RV32-NEXT: entry:
2296 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2297 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2299 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_se_u64m8(
2300 // CHECK-RV64-NEXT: entry:
2301 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2302 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2304 vuint64m8_t test_sf_vc_v_ivv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
2305 return __riscv_sf_vc_v_ivv_se_u64m8(p27_26, vd, vs2, simm5, vl);
2308 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf8(
2309 // CHECK-RV32-NEXT: entry:
2310 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i32.i32.i32(i32 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2311 // CHECK-RV32-NEXT: ret <vscale x 1 x i8> [[TMP0]]
2313 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf8(
2314 // CHECK-RV64-NEXT: entry:
2315 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.i64.i64.i64(i64 3, <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2316 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
2318 vuint8mf8_t test_sf_vc_v_ivv_u8mf8(vuint8mf8_t vd, vuint8mf8_t vs2, size_t vl) {
2319 return __riscv_sf_vc_v_ivv_u8mf8(p27_26, vd, vs2, simm5, vl);
2322 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf4(
2323 // CHECK-RV32-NEXT: entry:
2324 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i32.i32.i32(i32 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2325 // CHECK-RV32-NEXT: ret <vscale x 2 x i8> [[TMP0]]
2327 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf4(
2328 // CHECK-RV64-NEXT: entry:
2329 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.i64.i64.i64(i64 3, <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2330 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
2332 vuint8mf4_t test_sf_vc_v_ivv_u8mf4(vuint8mf4_t vd, vuint8mf4_t vs2, size_t vl) {
2333 return __riscv_sf_vc_v_ivv_u8mf4(p27_26, vd, vs2, simm5, vl);
2336 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8mf2(
2337 // CHECK-RV32-NEXT: entry:
2338 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i32.i32.i32(i32 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2339 // CHECK-RV32-NEXT: ret <vscale x 4 x i8> [[TMP0]]
2341 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8mf2(
2342 // CHECK-RV64-NEXT: entry:
2343 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.i64.i64.i64(i64 3, <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2344 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
2346 vuint8mf2_t test_sf_vc_v_ivv_u8mf2(vuint8mf2_t vd, vuint8mf2_t vs2, size_t vl) {
2347 return __riscv_sf_vc_v_ivv_u8mf2(p27_26, vd, vs2, simm5, vl);
2350 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m1(
2351 // CHECK-RV32-NEXT: entry:
2352 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i32.i32.i32(i32 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2353 // CHECK-RV32-NEXT: ret <vscale x 8 x i8> [[TMP0]]
2355 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m1(
2356 // CHECK-RV64-NEXT: entry:
2357 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.i64.i64.i64(i64 3, <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2358 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
2360 vuint8m1_t test_sf_vc_v_ivv_u8m1(vuint8m1_t vd, vuint8m1_t vs2, size_t vl) {
2361 return __riscv_sf_vc_v_ivv_u8m1(p27_26, vd, vs2, simm5, vl);
2364 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m2(
2365 // CHECK-RV32-NEXT: entry:
2366 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i32.i32.i32(i32 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2367 // CHECK-RV32-NEXT: ret <vscale x 16 x i8> [[TMP0]]
2369 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m2(
2370 // CHECK-RV64-NEXT: entry:
2371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.i64.i64.i64(i64 3, <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2372 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
2374 vuint8m2_t test_sf_vc_v_ivv_u8m2(vuint8m2_t vd, vuint8m2_t vs2, size_t vl) {
2375 return __riscv_sf_vc_v_ivv_u8m2(p27_26, vd, vs2, simm5, vl);
2378 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m4(
2379 // CHECK-RV32-NEXT: entry:
2380 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i32.i32.i32(i32 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2381 // CHECK-RV32-NEXT: ret <vscale x 32 x i8> [[TMP0]]
2383 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m4(
2384 // CHECK-RV64-NEXT: entry:
2385 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.i64.i64.i64(i64 3, <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2386 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
2388 vuint8m4_t test_sf_vc_v_ivv_u8m4(vuint8m4_t vd, vuint8m4_t vs2, size_t vl) {
2389 return __riscv_sf_vc_v_ivv_u8m4(p27_26, vd, vs2, simm5, vl);
2392 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u8m8(
2393 // CHECK-RV32-NEXT: entry:
2394 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i32.i32.i32(i32 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2395 // CHECK-RV32-NEXT: ret <vscale x 64 x i8> [[TMP0]]
2397 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u8m8(
2398 // CHECK-RV64-NEXT: entry:
2399 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.i64.i64.i64(i64 3, <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2400 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
2402 vuint8m8_t test_sf_vc_v_ivv_u8m8(vuint8m8_t vd, vuint8m8_t vs2, size_t vl) {
2403 return __riscv_sf_vc_v_ivv_u8m8(p27_26, vd, vs2, simm5, vl);
2406 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16mf4(
2407 // CHECK-RV32-NEXT: entry:
2408 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i32.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2409 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2411 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16mf4(
2412 // CHECK-RV64-NEXT: entry:
2413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.i64.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2414 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2416 vuint16mf4_t test_sf_vc_v_ivv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, size_t vl) {
2417 return __riscv_sf_vc_v_ivv_u16mf4(p27_26, vd, vs2, simm5, vl);
2420 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16mf2(
2421 // CHECK-RV32-NEXT: entry:
2422 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i32.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2423 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2425 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16mf2(
2426 // CHECK-RV64-NEXT: entry:
2427 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.i64.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2428 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2430 vuint16mf2_t test_sf_vc_v_ivv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, size_t vl) {
2431 return __riscv_sf_vc_v_ivv_u16mf2(p27_26, vd, vs2, simm5, vl);
2434 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m1(
2435 // CHECK-RV32-NEXT: entry:
2436 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i32.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2437 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2439 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m1(
2440 // CHECK-RV64-NEXT: entry:
2441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.i64.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2442 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2444 vuint16m1_t test_sf_vc_v_ivv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, size_t vl) {
2445 return __riscv_sf_vc_v_ivv_u16m1(p27_26, vd, vs2, simm5, vl);
2448 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m2(
2449 // CHECK-RV32-NEXT: entry:
2450 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i32.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2451 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2453 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m2(
2454 // CHECK-RV64-NEXT: entry:
2455 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.i64.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2456 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2458 vuint16m2_t test_sf_vc_v_ivv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, size_t vl) {
2459 return __riscv_sf_vc_v_ivv_u16m2(p27_26, vd, vs2, simm5, vl);
2462 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m4(
2463 // CHECK-RV32-NEXT: entry:
2464 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i32.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2465 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2467 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m4(
2468 // CHECK-RV64-NEXT: entry:
2469 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.i64.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2470 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2472 vuint16m4_t test_sf_vc_v_ivv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, size_t vl) {
2473 return __riscv_sf_vc_v_ivv_u16m4(p27_26, vd, vs2, simm5, vl);
2476 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u16m8(
2477 // CHECK-RV32-NEXT: entry:
2478 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i32.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2479 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2481 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u16m8(
2482 // CHECK-RV64-NEXT: entry:
2483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.i64.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2484 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2486 vuint16m8_t test_sf_vc_v_ivv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, size_t vl) {
2487 return __riscv_sf_vc_v_ivv_u16m8(p27_26, vd, vs2, simm5, vl);
2490 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32mf2(
2491 // CHECK-RV32-NEXT: entry:
2492 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i32.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2493 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2495 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32mf2(
2496 // CHECK-RV64-NEXT: entry:
2497 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.i64.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2498 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2500 vuint32mf2_t test_sf_vc_v_ivv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
2501 return __riscv_sf_vc_v_ivv_u32mf2(p27_26, vd, vs2, simm5, vl);
2504 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m1(
2505 // CHECK-RV32-NEXT: entry:
2506 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i32.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2507 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2509 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m1(
2510 // CHECK-RV64-NEXT: entry:
2511 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.i64.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2512 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2514 vuint32m1_t test_sf_vc_v_ivv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
2515 return __riscv_sf_vc_v_ivv_u32m1(p27_26, vd, vs2, simm5, vl);
2518 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m2(
2519 // CHECK-RV32-NEXT: entry:
2520 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i32.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2521 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2523 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m2(
2524 // CHECK-RV64-NEXT: entry:
2525 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.i64.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2526 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2528 vuint32m2_t test_sf_vc_v_ivv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
2529 return __riscv_sf_vc_v_ivv_u32m2(p27_26, vd, vs2, simm5, vl);
2532 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m4(
2533 // CHECK-RV32-NEXT: entry:
2534 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i32.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2535 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2537 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m4(
2538 // CHECK-RV64-NEXT: entry:
2539 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.i64.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2540 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2542 vuint32m4_t test_sf_vc_v_ivv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
2543 return __riscv_sf_vc_v_ivv_u32m4(p27_26, vd, vs2, simm5, vl);
2546 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u32m8(
2547 // CHECK-RV32-NEXT: entry:
2548 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i32.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2549 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2551 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u32m8(
2552 // CHECK-RV64-NEXT: entry:
2553 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.i64.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2554 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2556 vuint32m8_t test_sf_vc_v_ivv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
2557 return __riscv_sf_vc_v_ivv_u32m8(p27_26, vd, vs2, simm5, vl);
2560 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m1(
2561 // CHECK-RV32-NEXT: entry:
2562 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2563 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2565 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m1(
2566 // CHECK-RV64-NEXT: entry:
2567 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.i64.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2568 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2570 vuint64m1_t test_sf_vc_v_ivv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, size_t vl) {
2571 return __riscv_sf_vc_v_ivv_u64m1(p27_26, vd, vs2, simm5, vl);
2574 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m2(
2575 // CHECK-RV32-NEXT: entry:
2576 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2577 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2579 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m2(
2580 // CHECK-RV64-NEXT: entry:
2581 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.i64.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2582 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2584 vuint64m2_t test_sf_vc_v_ivv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, size_t vl) {
2585 return __riscv_sf_vc_v_ivv_u64m2(p27_26, vd, vs2, simm5, vl);
2588 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m4(
2589 // CHECK-RV32-NEXT: entry:
2590 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2591 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2593 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m4(
2594 // CHECK-RV64-NEXT: entry:
2595 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.i64.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2596 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2598 vuint64m4_t test_sf_vc_v_ivv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, size_t vl) {
2599 return __riscv_sf_vc_v_ivv_u64m4(p27_26, vd, vs2, simm5, vl);
2602 // CHECK-RV32-LABEL: @test_sf_vc_v_ivv_u64m8(
2603 // CHECK-RV32-NEXT: entry:
2604 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
2605 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2607 // CHECK-RV64-LABEL: @test_sf_vc_v_ivv_u64m8(
2608 // CHECK-RV64-NEXT: entry:
2609 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.i64.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
2610 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2612 vuint64m8_t test_sf_vc_v_ivv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, size_t vl) {
2613 return __riscv_sf_vc_v_ivv_u64m8(p27_26, vd, vs2, simm5, vl);
2616 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16mf4(
2617 // CHECK-RV32-NEXT: entry:
2618 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2619 // CHECK-RV32-NEXT: ret void
2621 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16mf4(
2622 // CHECK-RV64-NEXT: entry:
2623 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i16.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2624 // CHECK-RV64-NEXT: ret void
2626 void test_sf_vc_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
2627 __riscv_sf_vc_fvv_se_u16mf4(p26, vd, vs2, fs1, vl);
2630 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16mf2(
2631 // CHECK-RV32-NEXT: entry:
2632 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2633 // CHECK-RV32-NEXT: ret void
2635 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16mf2(
2636 // CHECK-RV64-NEXT: entry:
2637 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i16.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2638 // CHECK-RV64-NEXT: ret void
2640 void test_sf_vc_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
2641 __riscv_sf_vc_fvv_se_u16mf2(p26, vd, vs2, fs1, vl);
2644 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m1(
2645 // CHECK-RV32-NEXT: entry:
2646 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2647 // CHECK-RV32-NEXT: ret void
2649 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m1(
2650 // CHECK-RV64-NEXT: entry:
2651 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i16.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2652 // CHECK-RV64-NEXT: ret void
2654 void test_sf_vc_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
2655 __riscv_sf_vc_fvv_se_u16m1(p26, vd, vs2, fs1, vl);
2658 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m2(
2659 // CHECK-RV32-NEXT: entry:
2660 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2661 // CHECK-RV32-NEXT: ret void
2663 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m2(
2664 // CHECK-RV64-NEXT: entry:
2665 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i16.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2666 // CHECK-RV64-NEXT: ret void
2668 void test_sf_vc_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
2669 __riscv_sf_vc_fvv_se_u16m2(p26, vd, vs2, fs1, vl);
2672 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m4(
2673 // CHECK-RV32-NEXT: entry:
2674 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2675 // CHECK-RV32-NEXT: ret void
2677 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m4(
2678 // CHECK-RV64-NEXT: entry:
2679 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i16.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2680 // CHECK-RV64-NEXT: ret void
2682 void test_sf_vc_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
2683 __riscv_sf_vc_fvv_se_u16m4(p26, vd, vs2, fs1, vl);
2686 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u16m8(
2687 // CHECK-RV32-NEXT: entry:
2688 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv32i16.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2689 // CHECK-RV32-NEXT: ret void
2691 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u16m8(
2692 // CHECK-RV64-NEXT: entry:
2693 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv32i16.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2694 // CHECK-RV64-NEXT: ret void
2696 void test_sf_vc_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
2697 __riscv_sf_vc_fvv_se_u16m8(p26, vd, vs2, fs1, vl);
2700 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32mf2(
2701 // CHECK-RV32-NEXT: entry:
2702 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2703 // CHECK-RV32-NEXT: ret void
2705 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32mf2(
2706 // CHECK-RV64-NEXT: entry:
2707 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2708 // CHECK-RV64-NEXT: ret void
2710 void test_sf_vc_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
2711 __riscv_sf_vc_fvv_se_u32mf2(p26, vd, vs2, fs1, vl);
2714 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m1(
2715 // CHECK-RV32-NEXT: entry:
2716 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2717 // CHECK-RV32-NEXT: ret void
2719 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m1(
2720 // CHECK-RV64-NEXT: entry:
2721 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2722 // CHECK-RV64-NEXT: ret void
2724 void test_sf_vc_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
2725 __riscv_sf_vc_fvv_se_u32m1(p26, vd, vs2, fs1, vl);
2728 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m2(
2729 // CHECK-RV32-NEXT: entry:
2730 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2731 // CHECK-RV32-NEXT: ret void
2733 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m2(
2734 // CHECK-RV64-NEXT: entry:
2735 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2736 // CHECK-RV64-NEXT: ret void
2738 void test_sf_vc_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
2739 __riscv_sf_vc_fvv_se_u32m2(p26, vd, vs2, fs1, vl);
2742 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m4(
2743 // CHECK-RV32-NEXT: entry:
2744 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2745 // CHECK-RV32-NEXT: ret void
2747 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m4(
2748 // CHECK-RV64-NEXT: entry:
2749 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2750 // CHECK-RV64-NEXT: ret void
2752 void test_sf_vc_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
2753 __riscv_sf_vc_fvv_se_u32m4(p26, vd, vs2, fs1, vl);
2756 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u32m8(
2757 // CHECK-RV32-NEXT: entry:
2758 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv16i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2759 // CHECK-RV32-NEXT: ret void
2761 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u32m8(
2762 // CHECK-RV64-NEXT: entry:
2763 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv16i32.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2764 // CHECK-RV64-NEXT: ret void
2766 void test_sf_vc_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
2767 __riscv_sf_vc_fvv_se_u32m8(p26, vd, vs2, fs1, vl);
2770 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m1(
2771 // CHECK-RV32-NEXT: entry:
2772 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv1i64.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
2773 // CHECK-RV32-NEXT: ret void
2775 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m1(
2776 // CHECK-RV64-NEXT: entry:
2777 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv1i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
2778 // CHECK-RV64-NEXT: ret void
2780 void test_sf_vc_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
2781 __riscv_sf_vc_fvv_se_u64m1(p26, vd, vs2, fs1, vl);
2784 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m2(
2785 // CHECK-RV32-NEXT: entry:
2786 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv2i64.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
2787 // CHECK-RV32-NEXT: ret void
2789 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m2(
2790 // CHECK-RV64-NEXT: entry:
2791 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv2i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
2792 // CHECK-RV64-NEXT: ret void
2794 void test_sf_vc_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
2795 __riscv_sf_vc_fvv_se_u64m2(p26, vd, vs2, fs1, vl);
2798 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m4(
2799 // CHECK-RV32-NEXT: entry:
2800 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv4i64.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
2801 // CHECK-RV32-NEXT: ret void
2803 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m4(
2804 // CHECK-RV64-NEXT: entry:
2805 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv4i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
2806 // CHECK-RV64-NEXT: ret void
2808 void test_sf_vc_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
2809 __riscv_sf_vc_fvv_se_u64m4(p26, vd, vs2, fs1, vl);
2812 // CHECK-RV32-LABEL: @test_sf_vc_fvv_se_u64m8(
2813 // CHECK-RV32-NEXT: entry:
2814 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i32.nxv8i64.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
2815 // CHECK-RV32-NEXT: ret void
2817 // CHECK-RV64-LABEL: @test_sf_vc_fvv_se_u64m8(
2818 // CHECK-RV64-NEXT: entry:
2819 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvv.se.i64.nxv8i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
2820 // CHECK-RV64-NEXT: ret void
2822 void test_sf_vc_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
2823 __riscv_sf_vc_fvv_se_u64m8(p26, vd, vs2, fs1, vl);
2826 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16mf4(
2827 // CHECK-RV32-NEXT: entry:
2828 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i32.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2829 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2831 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16mf4(
2832 // CHECK-RV64-NEXT: entry:
2833 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.i64.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2834 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2836 vuint16mf4_t test_sf_vc_v_fvv_se_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
2837 return __riscv_sf_vc_v_fvv_se_u16mf4(p26, vd, vs2, fs1, vl);
2840 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16mf2(
2841 // CHECK-RV32-NEXT: entry:
2842 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i32.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2843 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2845 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16mf2(
2846 // CHECK-RV64-NEXT: entry:
2847 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.i64.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2848 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2850 vuint16mf2_t test_sf_vc_v_fvv_se_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
2851 return __riscv_sf_vc_v_fvv_se_u16mf2(p26, vd, vs2, fs1, vl);
2854 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m1(
2855 // CHECK-RV32-NEXT: entry:
2856 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i32.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2857 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2859 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m1(
2860 // CHECK-RV64-NEXT: entry:
2861 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.i64.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2862 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2864 vuint16m1_t test_sf_vc_v_fvv_se_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
2865 return __riscv_sf_vc_v_fvv_se_u16m1(p26, vd, vs2, fs1, vl);
2868 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m2(
2869 // CHECK-RV32-NEXT: entry:
2870 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i32.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2871 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2873 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m2(
2874 // CHECK-RV64-NEXT: entry:
2875 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.i64.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2876 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2878 vuint16m2_t test_sf_vc_v_fvv_se_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
2879 return __riscv_sf_vc_v_fvv_se_u16m2(p26, vd, vs2, fs1, vl);
2882 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m4(
2883 // CHECK-RV32-NEXT: entry:
2884 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i32.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2885 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2887 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m4(
2888 // CHECK-RV64-NEXT: entry:
2889 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.i64.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2890 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2892 vuint16m4_t test_sf_vc_v_fvv_se_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
2893 return __riscv_sf_vc_v_fvv_se_u16m4(p26, vd, vs2, fs1, vl);
2896 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u16m8(
2897 // CHECK-RV32-NEXT: entry:
2898 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i32.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2899 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2901 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u16m8(
2902 // CHECK-RV64-NEXT: entry:
2903 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.i64.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2904 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2906 vuint16m8_t test_sf_vc_v_fvv_se_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
2907 return __riscv_sf_vc_v_fvv_se_u16m8(p26, vd, vs2, fs1, vl);
2910 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32mf2(
2911 // CHECK-RV32-NEXT: entry:
2912 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2913 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2915 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32mf2(
2916 // CHECK-RV64-NEXT: entry:
2917 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.i64.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2918 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2920 vuint32mf2_t test_sf_vc_v_fvv_se_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
2921 return __riscv_sf_vc_v_fvv_se_u32mf2(p26, vd, vs2, fs1, vl);
2924 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m1(
2925 // CHECK-RV32-NEXT: entry:
2926 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2927 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2929 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m1(
2930 // CHECK-RV64-NEXT: entry:
2931 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.i64.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2932 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2934 vuint32m1_t test_sf_vc_v_fvv_se_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
2935 return __riscv_sf_vc_v_fvv_se_u32m1(p26, vd, vs2, fs1, vl);
2938 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m2(
2939 // CHECK-RV32-NEXT: entry:
2940 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2941 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2943 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m2(
2944 // CHECK-RV64-NEXT: entry:
2945 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.i64.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2946 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2948 vuint32m2_t test_sf_vc_v_fvv_se_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
2949 return __riscv_sf_vc_v_fvv_se_u32m2(p26, vd, vs2, fs1, vl);
2952 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m4(
2953 // CHECK-RV32-NEXT: entry:
2954 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2955 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2957 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m4(
2958 // CHECK-RV64-NEXT: entry:
2959 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.i64.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2960 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2962 vuint32m4_t test_sf_vc_v_fvv_se_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
2963 return __riscv_sf_vc_v_fvv_se_u32m4(p26, vd, vs2, fs1, vl);
2966 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u32m8(
2967 // CHECK-RV32-NEXT: entry:
2968 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2969 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2971 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u32m8(
2972 // CHECK-RV64-NEXT: entry:
2973 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.i64.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2974 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2976 vuint32m8_t test_sf_vc_v_fvv_se_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
2977 return __riscv_sf_vc_v_fvv_se_u32m8(p26, vd, vs2, fs1, vl);
2980 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m1(
2981 // CHECK-RV32-NEXT: entry:
2982 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i32.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
2983 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2985 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m1(
2986 // CHECK-RV64-NEXT: entry:
2987 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
2988 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2990 vuint64m1_t test_sf_vc_v_fvv_se_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
2991 return __riscv_sf_vc_v_fvv_se_u64m1(p26, vd, vs2, fs1, vl);
2994 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m2(
2995 // CHECK-RV32-NEXT: entry:
2996 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i32.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
2997 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2999 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m2(
3000 // CHECK-RV64-NEXT: entry:
3001 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
3002 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
3004 vuint64m2_t test_sf_vc_v_fvv_se_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
3005 return __riscv_sf_vc_v_fvv_se_u64m2(p26, vd, vs2, fs1, vl);
3008 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m4(
3009 // CHECK-RV32-NEXT: entry:
3010 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i32.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
3011 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
3013 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m4(
3014 // CHECK-RV64-NEXT: entry:
3015 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
3016 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
3018 vuint64m4_t test_sf_vc_v_fvv_se_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
3019 return __riscv_sf_vc_v_fvv_se_u64m4(p26, vd, vs2, fs1, vl);
3022 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_se_u64m8(
3023 // CHECK-RV32-NEXT: entry:
3024 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i32.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
3025 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
3027 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_se_u64m8(
3028 // CHECK-RV64-NEXT: entry:
3029 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
3030 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
3032 vuint64m8_t test_sf_vc_v_fvv_se_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
3033 return __riscv_sf_vc_v_fvv_se_u64m8(p26, vd, vs2, fs1, vl);
3036 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16mf4(
3037 // CHECK-RV32-NEXT: entry:
3038 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.i32.f16.i32(i32 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
3039 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
3041 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16mf4(
3042 // CHECK-RV64-NEXT: entry:
3043 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.i64.f16.i64(i64 1, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
3044 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
3046 vuint16mf4_t test_sf_vc_v_fvv_u16mf4(vuint16mf4_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
3047 return __riscv_sf_vc_v_fvv_u16mf4(p26, vd, vs2, fs1, vl);
3050 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16mf2(
3051 // CHECK-RV32-NEXT: entry:
3052 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.i32.f16.i32(i32 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
3053 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
3055 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16mf2(
3056 // CHECK-RV64-NEXT: entry:
3057 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.i64.f16.i64(i64 1, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
3058 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
3060 vuint16mf2_t test_sf_vc_v_fvv_u16mf2(vuint16mf2_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
3061 return __riscv_sf_vc_v_fvv_u16mf2(p26, vd, vs2, fs1, vl);
3064 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m1(
3065 // CHECK-RV32-NEXT: entry:
3066 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.i32.f16.i32(i32 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
3067 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
3069 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m1(
3070 // CHECK-RV64-NEXT: entry:
3071 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.i64.f16.i64(i64 1, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
3072 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
3074 vuint16m1_t test_sf_vc_v_fvv_u16m1(vuint16m1_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
3075 return __riscv_sf_vc_v_fvv_u16m1(p26, vd, vs2, fs1, vl);
3078 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m2(
3079 // CHECK-RV32-NEXT: entry:
3080 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.i32.f16.i32(i32 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
3081 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
3083 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m2(
3084 // CHECK-RV64-NEXT: entry:
3085 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.i64.f16.i64(i64 1, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
3086 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
3088 vuint16m2_t test_sf_vc_v_fvv_u16m2(vuint16m2_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
3089 return __riscv_sf_vc_v_fvv_u16m2(p26, vd, vs2, fs1, vl);
3092 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m4(
3093 // CHECK-RV32-NEXT: entry:
3094 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.i32.f16.i32(i32 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
3095 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
3097 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m4(
3098 // CHECK-RV64-NEXT: entry:
3099 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.i64.f16.i64(i64 1, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
3100 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
3102 vuint16m4_t test_sf_vc_v_fvv_u16m4(vuint16m4_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
3103 return __riscv_sf_vc_v_fvv_u16m4(p26, vd, vs2, fs1, vl);
3106 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u16m8(
3107 // CHECK-RV32-NEXT: entry:
3108 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.i32.f16.i32(i32 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
3109 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
3111 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u16m8(
3112 // CHECK-RV64-NEXT: entry:
3113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.i64.f16.i64(i64 1, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
3114 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
3116 vuint16m8_t test_sf_vc_v_fvv_u16m8(vuint16m8_t vd, vuint16m8_t vs2, _Float16 fs1, size_t vl) {
3117 return __riscv_sf_vc_v_fvv_u16m8(p26, vd, vs2, fs1, vl);
3120 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32mf2(
3121 // CHECK-RV32-NEXT: entry:
3122 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.i32.f32.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
3123 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
3125 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32mf2(
3126 // CHECK-RV64-NEXT: entry:
3127 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.i64.f32.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
3128 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
3130 vuint32mf2_t test_sf_vc_v_fvv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
3131 return __riscv_sf_vc_v_fvv_u32mf2(p26, vd, vs2, fs1, vl);
3134 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m1(
3135 // CHECK-RV32-NEXT: entry:
3136 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.i32.f32.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
3137 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
3139 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m1(
3140 // CHECK-RV64-NEXT: entry:
3141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.i64.f32.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
3142 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
3144 vuint32m1_t test_sf_vc_v_fvv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
3145 return __riscv_sf_vc_v_fvv_u32m1(p26, vd, vs2, fs1, vl);
3148 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m2(
3149 // CHECK-RV32-NEXT: entry:
3150 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.i32.f32.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
3151 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
3153 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m2(
3154 // CHECK-RV64-NEXT: entry:
3155 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.i64.f32.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
3156 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
3158 vuint32m2_t test_sf_vc_v_fvv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
3159 return __riscv_sf_vc_v_fvv_u32m2(p26, vd, vs2, fs1, vl);
3162 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m4(
3163 // CHECK-RV32-NEXT: entry:
3164 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.i32.f32.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
3165 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
3167 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m4(
3168 // CHECK-RV64-NEXT: entry:
3169 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.i64.f32.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
3170 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
3172 vuint32m4_t test_sf_vc_v_fvv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
3173 return __riscv_sf_vc_v_fvv_u32m4(p26, vd, vs2, fs1, vl);
3176 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u32m8(
3177 // CHECK-RV32-NEXT: entry:
3178 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.i32.f32.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
3179 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
3181 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u32m8(
3182 // CHECK-RV64-NEXT: entry:
3183 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.i64.f32.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
3184 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
3186 vuint32m8_t test_sf_vc_v_fvv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, float fs1, size_t vl) {
3187 return __riscv_sf_vc_v_fvv_u32m8(p26, vd, vs2, fs1, vl);
3190 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m1(
3191 // CHECK-RV32-NEXT: entry:
3192 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.i32.f64.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
3193 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
3195 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m1(
3196 // CHECK-RV64-NEXT: entry:
3197 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.i64.f64.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
3198 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
3200 vuint64m1_t test_sf_vc_v_fvv_u64m1(vuint64m1_t vd, vuint64m1_t vs2, double fs1, size_t vl) {
3201 return __riscv_sf_vc_v_fvv_u64m1(p26, vd, vs2, fs1, vl);
3204 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m2(
3205 // CHECK-RV32-NEXT: entry:
3206 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.i32.f64.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
3207 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
3209 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m2(
3210 // CHECK-RV64-NEXT: entry:
3211 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.i64.f64.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
3212 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
3214 vuint64m2_t test_sf_vc_v_fvv_u64m2(vuint64m2_t vd, vuint64m2_t vs2, double fs1, size_t vl) {
3215 return __riscv_sf_vc_v_fvv_u64m2(p26, vd, vs2, fs1, vl);
3218 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m4(
3219 // CHECK-RV32-NEXT: entry:
3220 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.i32.f64.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
3221 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
3223 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m4(
3224 // CHECK-RV64-NEXT: entry:
3225 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.i64.f64.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
3226 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
3228 vuint64m4_t test_sf_vc_v_fvv_u64m4(vuint64m4_t vd, vuint64m4_t vs2, double fs1, size_t vl) {
3229 return __riscv_sf_vc_v_fvv_u64m4(p26, vd, vs2, fs1, vl);
3232 // CHECK-RV32-LABEL: @test_sf_vc_v_fvv_u64m8(
3233 // CHECK-RV32-NEXT: entry:
3234 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.i32.f64.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i32 [[VL:%.*]])
3235 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
3237 // CHECK-RV64-LABEL: @test_sf_vc_v_fvv_u64m8(
3238 // CHECK-RV64-NEXT: entry:
3239 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.i64.f64.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], double [[FS1:%.*]], i64 [[VL:%.*]])
3240 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
3242 vuint64m8_t test_sf_vc_v_fvv_u64m8(vuint64m8_t vd, vuint64m8_t vs2, double fs1, size_t vl) {
3243 return __riscv_sf_vc_v_fvv_u64m8(p26, vd, vs2, fs1, vl);