Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / RISCV / rvv-intrinsics-autogenerated / non-policy / non-overloaded / xsfvcp-xvw.c
blob23ee2b7bb0f4ed66702d6baf1df8ff6a17d34f80
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv32 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV32 %s
4 // RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh -target-feature +xsfvcp -disable-O0-optnone -emit-llvm %s -o - | opt -S -passes=mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
6 #include <sifive_vector.h>
8 #define p27_26 (0b11)
9 #define p26 (0b1)
10 #define simm5 (10)
12 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8mf8(
13 // CHECK-RV32-NEXT: entry:
14 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i16.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
15 // CHECK-RV32-NEXT: ret void
17 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8mf8(
18 // CHECK-RV64-NEXT: entry:
19 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv1i16.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
20 // CHECK-RV64-NEXT: ret void
22 void test_sf_vc_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
23 __riscv_sf_vc_vvw_se_u8mf8(p27_26, vd, vs2, vs1, vl);
26 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8mf4(
27 // CHECK-RV32-NEXT: entry:
28 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i16.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
29 // CHECK-RV32-NEXT: ret void
31 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8mf4(
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv2i16.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT: ret void
36 void test_sf_vc_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
37 __riscv_sf_vc_vvw_se_u8mf4(p27_26, vd, vs2, vs1, vl);
40 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8mf2(
41 // CHECK-RV32-NEXT: entry:
42 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i16.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
43 // CHECK-RV32-NEXT: ret void
45 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8mf2(
46 // CHECK-RV64-NEXT: entry:
47 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv4i16.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
48 // CHECK-RV64-NEXT: ret void
50 void test_sf_vc_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
51 __riscv_sf_vc_vvw_se_u8mf2(p27_26, vd, vs2, vs1, vl);
54 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8m1(
55 // CHECK-RV32-NEXT: entry:
56 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i16.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
57 // CHECK-RV32-NEXT: ret void
59 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8m1(
60 // CHECK-RV64-NEXT: entry:
61 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv8i16.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT: ret void
64 void test_sf_vc_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
65 __riscv_sf_vc_vvw_se_u8m1(p27_26, vd, vs2, vs1, vl);
68 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8m2(
69 // CHECK-RV32-NEXT: entry:
70 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv16i16.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
71 // CHECK-RV32-NEXT: ret void
73 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8m2(
74 // CHECK-RV64-NEXT: entry:
75 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv16i16.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
76 // CHECK-RV64-NEXT: ret void
78 void test_sf_vc_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
79 __riscv_sf_vc_vvw_se_u8m2(p27_26, vd, vs2, vs1, vl);
82 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u8m4(
83 // CHECK-RV32-NEXT: entry:
84 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv32i16.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
85 // CHECK-RV32-NEXT: ret void
87 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u8m4(
88 // CHECK-RV64-NEXT: entry:
89 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv32i16.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
90 // CHECK-RV64-NEXT: ret void
92 void test_sf_vc_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
93 __riscv_sf_vc_vvw_se_u8m4(p27_26, vd, vs2, vs1, vl);
96 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16mf4(
97 // CHECK-RV32-NEXT: entry:
98 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
99 // CHECK-RV32-NEXT: ret void
101 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16mf4(
102 // CHECK-RV64-NEXT: entry:
103 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv1i32.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
104 // CHECK-RV64-NEXT: ret void
106 void test_sf_vc_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
107 __riscv_sf_vc_vvw_se_u16mf4(p27_26, vd, vs2, vs1, vl);
110 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16mf2(
111 // CHECK-RV32-NEXT: entry:
112 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
113 // CHECK-RV32-NEXT: ret void
115 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16mf2(
116 // CHECK-RV64-NEXT: entry:
117 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv2i32.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
118 // CHECK-RV64-NEXT: ret void
120 void test_sf_vc_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
121 __riscv_sf_vc_vvw_se_u16mf2(p27_26, vd, vs2, vs1, vl);
124 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16m1(
125 // CHECK-RV32-NEXT: entry:
126 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
127 // CHECK-RV32-NEXT: ret void
129 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16m1(
130 // CHECK-RV64-NEXT: entry:
131 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv4i32.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT: ret void
134 void test_sf_vc_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
135 __riscv_sf_vc_vvw_se_u16m1(p27_26, vd, vs2, vs1, vl);
138 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16m2(
139 // CHECK-RV32-NEXT: entry:
140 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
141 // CHECK-RV32-NEXT: ret void
143 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16m2(
144 // CHECK-RV64-NEXT: entry:
145 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv8i32.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
146 // CHECK-RV64-NEXT: ret void
148 void test_sf_vc_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
149 __riscv_sf_vc_vvw_se_u16m2(p27_26, vd, vs2, vs1, vl);
152 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u16m4(
153 // CHECK-RV32-NEXT: entry:
154 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv16i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
155 // CHECK-RV32-NEXT: ret void
157 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u16m4(
158 // CHECK-RV64-NEXT: entry:
159 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv16i32.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
160 // CHECK-RV64-NEXT: ret void
162 void test_sf_vc_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
163 __riscv_sf_vc_vvw_se_u16m4(p27_26, vd, vs2, vs1, vl);
166 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u32mf2(
167 // CHECK-RV32-NEXT: entry:
168 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv1i64.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
169 // CHECK-RV32-NEXT: ret void
171 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u32mf2(
172 // CHECK-RV64-NEXT: entry:
173 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv1i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
174 // CHECK-RV64-NEXT: ret void
176 void test_sf_vc_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
177 __riscv_sf_vc_vvw_se_u32mf2(p27_26, vd, vs2, vs1, vl);
180 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u32m1(
181 // CHECK-RV32-NEXT: entry:
182 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv2i64.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
183 // CHECK-RV32-NEXT: ret void
185 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u32m1(
186 // CHECK-RV64-NEXT: entry:
187 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv2i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT: ret void
190 void test_sf_vc_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
191 __riscv_sf_vc_vvw_se_u32m1(p27_26, vd, vs2, vs1, vl);
194 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u32m2(
195 // CHECK-RV32-NEXT: entry:
196 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv4i64.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
197 // CHECK-RV32-NEXT: ret void
199 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u32m2(
200 // CHECK-RV64-NEXT: entry:
201 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv4i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
202 // CHECK-RV64-NEXT: ret void
204 void test_sf_vc_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
205 __riscv_sf_vc_vvw_se_u32m2(p27_26, vd, vs2, vs1, vl);
208 // CHECK-RV32-LABEL: @test_sf_vc_vvw_se_u32m4(
209 // CHECK-RV32-NEXT: entry:
210 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i32.nxv8i64.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
211 // CHECK-RV32-NEXT: ret void
213 // CHECK-RV64-LABEL: @test_sf_vc_vvw_se_u32m4(
214 // CHECK-RV64-NEXT: entry:
215 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.vvw.se.i64.nxv8i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
216 // CHECK-RV64-NEXT: ret void
218 void test_sf_vc_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
219 __riscv_sf_vc_vvw_se_u32m4(p27_26, vd, vs2, vs1, vl);
222 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf8(
223 // CHECK-RV32-NEXT: entry:
224 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i32.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
225 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
227 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf8(
228 // CHECK-RV64-NEXT: entry:
229 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.i64.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
230 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
232 vuint16mf4_t test_sf_vc_v_vvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
233 return __riscv_sf_vc_v_vvw_se_u8mf8(p27_26, vd, vs2, vs1, vl);
236 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf4(
237 // CHECK-RV32-NEXT: entry:
238 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i32.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
239 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
241 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf4(
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.i64.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
244 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
246 vuint16mf2_t test_sf_vc_v_vvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
247 return __riscv_sf_vc_v_vvw_se_u8mf4(p27_26, vd, vs2, vs1, vl);
250 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8mf2(
251 // CHECK-RV32-NEXT: entry:
252 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i32.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
253 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
255 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8mf2(
256 // CHECK-RV64-NEXT: entry:
257 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.i64.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
258 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
260 vuint16m1_t test_sf_vc_v_vvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
261 return __riscv_sf_vc_v_vvw_se_u8mf2(p27_26, vd, vs2, vs1, vl);
264 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m1(
265 // CHECK-RV32-NEXT: entry:
266 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i32.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
267 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
269 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m1(
270 // CHECK-RV64-NEXT: entry:
271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.i64.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
272 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
274 vuint16m2_t test_sf_vc_v_vvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
275 return __riscv_sf_vc_v_vvw_se_u8m1(p27_26, vd, vs2, vs1, vl);
278 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m2(
279 // CHECK-RV32-NEXT: entry:
280 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i32.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
281 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
283 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m2(
284 // CHECK-RV64-NEXT: entry:
285 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.i64.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
286 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
288 vuint16m4_t test_sf_vc_v_vvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
289 return __riscv_sf_vc_v_vvw_se_u8m2(p27_26, vd, vs2, vs1, vl);
292 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u8m4(
293 // CHECK-RV32-NEXT: entry:
294 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i32.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
295 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
297 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u8m4(
298 // CHECK-RV64-NEXT: entry:
299 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.i64.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
300 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
302 vuint16m8_t test_sf_vc_v_vvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
303 return __riscv_sf_vc_v_vvw_se_u8m4(p27_26, vd, vs2, vs1, vl);
306 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16mf4(
307 // CHECK-RV32-NEXT: entry:
308 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
309 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
311 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16mf4(
312 // CHECK-RV64-NEXT: entry:
313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.i64.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
314 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
316 vuint32mf2_t test_sf_vc_v_vvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
317 return __riscv_sf_vc_v_vvw_se_u16mf4(p27_26, vd, vs2, vs1, vl);
320 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16mf2(
321 // CHECK-RV32-NEXT: entry:
322 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
323 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
325 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16mf2(
326 // CHECK-RV64-NEXT: entry:
327 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.i64.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
328 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
330 vuint32m1_t test_sf_vc_v_vvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
331 return __riscv_sf_vc_v_vvw_se_u16mf2(p27_26, vd, vs2, vs1, vl);
334 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m1(
335 // CHECK-RV32-NEXT: entry:
336 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
337 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
339 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m1(
340 // CHECK-RV64-NEXT: entry:
341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.i64.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
344 vuint32m2_t test_sf_vc_v_vvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
345 return __riscv_sf_vc_v_vvw_se_u16m1(p27_26, vd, vs2, vs1, vl);
348 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m2(
349 // CHECK-RV32-NEXT: entry:
350 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
351 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
353 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m2(
354 // CHECK-RV64-NEXT: entry:
355 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.i64.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
356 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
358 vuint32m4_t test_sf_vc_v_vvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
359 return __riscv_sf_vc_v_vvw_se_u16m2(p27_26, vd, vs2, vs1, vl);
362 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u16m4(
363 // CHECK-RV32-NEXT: entry:
364 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
365 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
367 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u16m4(
368 // CHECK-RV64-NEXT: entry:
369 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.i64.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
370 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
372 vuint32m8_t test_sf_vc_v_vvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
373 return __riscv_sf_vc_v_vvw_se_u16m4(p27_26, vd, vs2, vs1, vl);
376 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32mf2(
377 // CHECK-RV32-NEXT: entry:
378 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
379 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
381 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32mf2(
382 // CHECK-RV64-NEXT: entry:
383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
384 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
386 vuint64m1_t test_sf_vc_v_vvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
387 return __riscv_sf_vc_v_vvw_se_u32mf2(p27_26, vd, vs2, vs1, vl);
390 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m1(
391 // CHECK-RV32-NEXT: entry:
392 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
393 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
395 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m1(
396 // CHECK-RV64-NEXT: entry:
397 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
398 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
400 vuint64m2_t test_sf_vc_v_vvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
401 return __riscv_sf_vc_v_vvw_se_u32m1(p27_26, vd, vs2, vs1, vl);
404 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m2(
405 // CHECK-RV32-NEXT: entry:
406 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
407 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
409 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m2(
410 // CHECK-RV64-NEXT: entry:
411 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
412 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
414 vuint64m4_t test_sf_vc_v_vvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
415 return __riscv_sf_vc_v_vvw_se_u32m2(p27_26, vd, vs2, vs1, vl);
418 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_se_u32m4(
419 // CHECK-RV32-NEXT: entry:
420 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
421 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
423 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_se_u32m4(
424 // CHECK-RV64-NEXT: entry:
425 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
426 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
428 vuint64m8_t test_sf_vc_v_vvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
429 return __riscv_sf_vc_v_vvw_se_u32m4(p27_26, vd, vs2, vs1, vl);
432 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf8(
433 // CHECK-RV32-NEXT: entry:
434 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.i32.nxv1i8.nxv1i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
435 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
437 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf8(
438 // CHECK-RV64-NEXT: entry:
439 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.i64.nxv1i8.nxv1i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
440 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
442 vuint16mf4_t test_sf_vc_v_vvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
443 return __riscv_sf_vc_v_vvw_u8mf8(p27_26, vd, vs2, vs1, vl);
446 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf4(
447 // CHECK-RV32-NEXT: entry:
448 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.i32.nxv2i8.nxv2i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
449 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
451 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf4(
452 // CHECK-RV64-NEXT: entry:
453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.i64.nxv2i8.nxv2i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
454 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
456 vuint16mf2_t test_sf_vc_v_vvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
457 return __riscv_sf_vc_v_vvw_u8mf4(p27_26, vd, vs2, vs1, vl);
460 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8mf2(
461 // CHECK-RV32-NEXT: entry:
462 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.i32.nxv4i8.nxv4i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
463 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
465 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8mf2(
466 // CHECK-RV64-NEXT: entry:
467 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.i64.nxv4i8.nxv4i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
468 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
470 vuint16m1_t test_sf_vc_v_vvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
471 return __riscv_sf_vc_v_vvw_u8mf2(p27_26, vd, vs2, vs1, vl);
474 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m1(
475 // CHECK-RV32-NEXT: entry:
476 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.i32.nxv8i8.nxv8i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
477 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
479 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m1(
480 // CHECK-RV64-NEXT: entry:
481 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.i64.nxv8i8.nxv8i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
482 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
484 vuint16m2_t test_sf_vc_v_vvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
485 return __riscv_sf_vc_v_vvw_u8m1(p27_26, vd, vs2, vs1, vl);
488 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m2(
489 // CHECK-RV32-NEXT: entry:
490 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.i32.nxv16i8.nxv16i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
491 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
493 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m2(
494 // CHECK-RV64-NEXT: entry:
495 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.i64.nxv16i8.nxv16i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
498 vuint16m4_t test_sf_vc_v_vvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
499 return __riscv_sf_vc_v_vvw_u8m2(p27_26, vd, vs2, vs1, vl);
502 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u8m4(
503 // CHECK-RV32-NEXT: entry:
504 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.i32.nxv32i8.nxv32i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i32 [[VL:%.*]])
505 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
507 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u8m4(
508 // CHECK-RV64-NEXT: entry:
509 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.i64.nxv32i8.nxv32i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 [[VL:%.*]])
510 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
512 vuint16m8_t test_sf_vc_v_vvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
513 return __riscv_sf_vc_v_vvw_u8m4(p27_26, vd, vs2, vs1, vl);
516 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16mf4(
517 // CHECK-RV32-NEXT: entry:
518 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.i32.nxv1i16.nxv1i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
519 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
521 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16mf4(
522 // CHECK-RV64-NEXT: entry:
523 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.i64.nxv1i16.nxv1i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
524 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
526 vuint32mf2_t test_sf_vc_v_vvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, vuint16mf4_t vs1, size_t vl) {
527 return __riscv_sf_vc_v_vvw_u16mf4(p27_26, vd, vs2, vs1, vl);
530 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16mf2(
531 // CHECK-RV32-NEXT: entry:
532 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.i32.nxv2i16.nxv2i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
533 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
535 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16mf2(
536 // CHECK-RV64-NEXT: entry:
537 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.i64.nxv2i16.nxv2i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
538 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
540 vuint32m1_t test_sf_vc_v_vvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
541 return __riscv_sf_vc_v_vvw_u16mf2(p27_26, vd, vs2, vs1, vl);
544 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m1(
545 // CHECK-RV32-NEXT: entry:
546 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.i32.nxv4i16.nxv4i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
547 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
549 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m1(
550 // CHECK-RV64-NEXT: entry:
551 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.i64.nxv4i16.nxv4i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
552 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
554 vuint32m2_t test_sf_vc_v_vvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
555 return __riscv_sf_vc_v_vvw_u16m1(p27_26, vd, vs2, vs1, vl);
558 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m2(
559 // CHECK-RV32-NEXT: entry:
560 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.i32.nxv8i16.nxv8i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
561 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
563 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m2(
564 // CHECK-RV64-NEXT: entry:
565 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.i64.nxv8i16.nxv8i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
566 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
568 vuint32m4_t test_sf_vc_v_vvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
569 return __riscv_sf_vc_v_vvw_u16m2(p27_26, vd, vs2, vs1, vl);
572 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u16m4(
573 // CHECK-RV32-NEXT: entry:
574 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.i32.nxv16i16.nxv16i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i32 [[VL:%.*]])
575 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
577 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u16m4(
578 // CHECK-RV64-NEXT: entry:
579 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.i64.nxv16i16.nxv16i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 [[VL:%.*]])
580 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
582 vuint32m8_t test_sf_vc_v_vvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
583 return __riscv_sf_vc_v_vvw_u16m4(p27_26, vd, vs2, vs1, vl);
586 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32mf2(
587 // CHECK-RV32-NEXT: entry:
588 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.i32.nxv1i32.nxv1i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
589 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
591 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32mf2(
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.i64.nxv1i32.nxv1i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
594 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
596 vuint64m1_t test_sf_vc_v_vvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
597 return __riscv_sf_vc_v_vvw_u32mf2(p27_26, vd, vs2, vs1, vl);
600 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m1(
601 // CHECK-RV32-NEXT: entry:
602 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.i32.nxv2i32.nxv2i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
603 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
605 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m1(
606 // CHECK-RV64-NEXT: entry:
607 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.i64.nxv2i32.nxv2i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
608 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
610 vuint64m2_t test_sf_vc_v_vvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
611 return __riscv_sf_vc_v_vvw_u32m1(p27_26, vd, vs2, vs1, vl);
614 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m2(
615 // CHECK-RV32-NEXT: entry:
616 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.i32.nxv4i32.nxv4i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
617 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
619 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m2(
620 // CHECK-RV64-NEXT: entry:
621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.i64.nxv4i32.nxv4i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
624 vuint64m4_t test_sf_vc_v_vvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
625 return __riscv_sf_vc_v_vvw_u32m2(p27_26, vd, vs2, vs1, vl);
628 // CHECK-RV32-LABEL: @test_sf_vc_v_vvw_u32m4(
629 // CHECK-RV32-NEXT: entry:
630 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.i32.nxv8i32.nxv8i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i32 [[VL:%.*]])
631 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
633 // CHECK-RV64-LABEL: @test_sf_vc_v_vvw_u32m4(
634 // CHECK-RV64-NEXT: entry:
635 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.i64.nxv8i32.nxv8i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 [[VL:%.*]])
636 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
638 vuint64m8_t test_sf_vc_v_vvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
639 return __riscv_sf_vc_v_vvw_u32m4(p27_26, vd, vs2, vs1, vl);
642 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8mf8(
643 // CHECK-RV32-NEXT: entry:
644 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i16.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
645 // CHECK-RV32-NEXT: ret void
647 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8mf8(
648 // CHECK-RV64-NEXT: entry:
649 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv1i16.nxv1i8.i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
650 // CHECK-RV64-NEXT: ret void
652 void test_sf_vc_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
653 __riscv_sf_vc_xvw_se_u8mf8(p27_26, vd, vs2, rs1, vl);
656 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8mf4(
657 // CHECK-RV32-NEXT: entry:
658 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i16.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
659 // CHECK-RV32-NEXT: ret void
661 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8mf4(
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv2i16.nxv2i8.i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
664 // CHECK-RV64-NEXT: ret void
666 void test_sf_vc_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
667 __riscv_sf_vc_xvw_se_u8mf4(p27_26, vd, vs2, rs1, vl);
670 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8mf2(
671 // CHECK-RV32-NEXT: entry:
672 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i16.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
673 // CHECK-RV32-NEXT: ret void
675 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8mf2(
676 // CHECK-RV64-NEXT: entry:
677 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv4i16.nxv4i8.i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
678 // CHECK-RV64-NEXT: ret void
680 void test_sf_vc_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
681 __riscv_sf_vc_xvw_se_u8mf2(p27_26, vd, vs2, rs1, vl);
684 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8m1(
685 // CHECK-RV32-NEXT: entry:
686 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i16.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
687 // CHECK-RV32-NEXT: ret void
689 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8m1(
690 // CHECK-RV64-NEXT: entry:
691 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv8i16.nxv8i8.i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
692 // CHECK-RV64-NEXT: ret void
694 void test_sf_vc_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
695 __riscv_sf_vc_xvw_se_u8m1(p27_26, vd, vs2, rs1, vl);
698 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8m2(
699 // CHECK-RV32-NEXT: entry:
700 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv16i16.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
701 // CHECK-RV32-NEXT: ret void
703 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8m2(
704 // CHECK-RV64-NEXT: entry:
705 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv16i16.nxv16i8.i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
706 // CHECK-RV64-NEXT: ret void
708 void test_sf_vc_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
709 __riscv_sf_vc_xvw_se_u8m2(p27_26, vd, vs2, rs1, vl);
712 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u8m4(
713 // CHECK-RV32-NEXT: entry:
714 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv32i16.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
715 // CHECK-RV32-NEXT: ret void
717 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u8m4(
718 // CHECK-RV64-NEXT: entry:
719 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv32i16.nxv32i8.i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
720 // CHECK-RV64-NEXT: ret void
722 void test_sf_vc_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
723 __riscv_sf_vc_xvw_se_u8m4(p27_26, vd, vs2, rs1, vl);
726 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16mf4(
727 // CHECK-RV32-NEXT: entry:
728 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
729 // CHECK-RV32-NEXT: ret void
731 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16mf4(
732 // CHECK-RV64-NEXT: entry:
733 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv1i32.nxv1i16.i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
734 // CHECK-RV64-NEXT: ret void
736 void test_sf_vc_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
737 __riscv_sf_vc_xvw_se_u16mf4(p27_26, vd, vs2, rs1, vl);
740 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16mf2(
741 // CHECK-RV32-NEXT: entry:
742 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
743 // CHECK-RV32-NEXT: ret void
745 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16mf2(
746 // CHECK-RV64-NEXT: entry:
747 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv2i32.nxv2i16.i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
748 // CHECK-RV64-NEXT: ret void
750 void test_sf_vc_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
751 __riscv_sf_vc_xvw_se_u16mf2(p27_26, vd, vs2, rs1, vl);
754 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16m1(
755 // CHECK-RV32-NEXT: entry:
756 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
757 // CHECK-RV32-NEXT: ret void
759 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16m1(
760 // CHECK-RV64-NEXT: entry:
761 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv4i32.nxv4i16.i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
762 // CHECK-RV64-NEXT: ret void
764 void test_sf_vc_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
765 __riscv_sf_vc_xvw_se_u16m1(p27_26, vd, vs2, rs1, vl);
768 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16m2(
769 // CHECK-RV32-NEXT: entry:
770 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
771 // CHECK-RV32-NEXT: ret void
773 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16m2(
774 // CHECK-RV64-NEXT: entry:
775 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv8i32.nxv8i16.i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
776 // CHECK-RV64-NEXT: ret void
778 void test_sf_vc_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
779 __riscv_sf_vc_xvw_se_u16m2(p27_26, vd, vs2, rs1, vl);
782 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u16m4(
783 // CHECK-RV32-NEXT: entry:
784 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv16i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
785 // CHECK-RV32-NEXT: ret void
787 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u16m4(
788 // CHECK-RV64-NEXT: entry:
789 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv16i32.nxv16i16.i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
790 // CHECK-RV64-NEXT: ret void
792 void test_sf_vc_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
793 __riscv_sf_vc_xvw_se_u16m4(p27_26, vd, vs2, rs1, vl);
796 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u32mf2(
797 // CHECK-RV32-NEXT: entry:
798 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
799 // CHECK-RV32-NEXT: ret void
801 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u32mf2(
802 // CHECK-RV64-NEXT: entry:
803 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv1i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
804 // CHECK-RV64-NEXT: ret void
806 void test_sf_vc_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
807 __riscv_sf_vc_xvw_se_u32mf2(p27_26, vd, vs2, rs1, vl);
810 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u32m1(
811 // CHECK-RV32-NEXT: entry:
812 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
813 // CHECK-RV32-NEXT: ret void
815 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u32m1(
816 // CHECK-RV64-NEXT: entry:
817 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv2i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
818 // CHECK-RV64-NEXT: ret void
820 void test_sf_vc_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
821 __riscv_sf_vc_xvw_se_u32m1(p27_26, vd, vs2, rs1, vl);
824 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u32m2(
825 // CHECK-RV32-NEXT: entry:
826 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
827 // CHECK-RV32-NEXT: ret void
829 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u32m2(
830 // CHECK-RV64-NEXT: entry:
831 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv4i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
832 // CHECK-RV64-NEXT: ret void
834 void test_sf_vc_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
835 __riscv_sf_vc_xvw_se_u32m2(p27_26, vd, vs2, rs1, vl);
838 // CHECK-RV32-LABEL: @test_sf_vc_xvw_se_u32m4(
839 // CHECK-RV32-NEXT: entry:
840 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i32.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
841 // CHECK-RV32-NEXT: ret void
843 // CHECK-RV64-LABEL: @test_sf_vc_xvw_se_u32m4(
844 // CHECK-RV64-NEXT: entry:
845 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.xvw.se.i64.nxv8i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
846 // CHECK-RV64-NEXT: ret void
848 void test_sf_vc_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
849 __riscv_sf_vc_xvw_se_u32m4(p27_26, vd, vs2, rs1, vl);
852 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf8(
853 // CHECK-RV32-NEXT: entry:
854 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i32.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
855 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
857 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf8(
858 // CHECK-RV64-NEXT: entry:
859 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.i64.nxv1i8.i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
860 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
862 vuint16mf4_t test_sf_vc_v_xvw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
863 return __riscv_sf_vc_v_xvw_se_u8mf8(p27_26, vd, vs2, rs1, vl);
866 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf4(
867 // CHECK-RV32-NEXT: entry:
868 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i32.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
869 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
871 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf4(
872 // CHECK-RV64-NEXT: entry:
873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.i64.nxv2i8.i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
874 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
876 vuint16mf2_t test_sf_vc_v_xvw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
877 return __riscv_sf_vc_v_xvw_se_u8mf4(p27_26, vd, vs2, rs1, vl);
880 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8mf2(
881 // CHECK-RV32-NEXT: entry:
882 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i32.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
883 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
885 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8mf2(
886 // CHECK-RV64-NEXT: entry:
887 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.i64.nxv4i8.i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
888 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
890 vuint16m1_t test_sf_vc_v_xvw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
891 return __riscv_sf_vc_v_xvw_se_u8mf2(p27_26, vd, vs2, rs1, vl);
894 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m1(
895 // CHECK-RV32-NEXT: entry:
896 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i32.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
897 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
899 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m1(
900 // CHECK-RV64-NEXT: entry:
901 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.i64.nxv8i8.i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
902 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
904 vuint16m2_t test_sf_vc_v_xvw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
905 return __riscv_sf_vc_v_xvw_se_u8m1(p27_26, vd, vs2, rs1, vl);
908 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m2(
909 // CHECK-RV32-NEXT: entry:
910 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i32.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
911 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
913 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m2(
914 // CHECK-RV64-NEXT: entry:
915 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.i64.nxv16i8.i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
916 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
918 vuint16m4_t test_sf_vc_v_xvw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
919 return __riscv_sf_vc_v_xvw_se_u8m2(p27_26, vd, vs2, rs1, vl);
922 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u8m4(
923 // CHECK-RV32-NEXT: entry:
924 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i32.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
925 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
927 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u8m4(
928 // CHECK-RV64-NEXT: entry:
929 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.i64.nxv32i8.i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
930 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
932 vuint16m8_t test_sf_vc_v_xvw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
933 return __riscv_sf_vc_v_xvw_se_u8m4(p27_26, vd, vs2, rs1, vl);
936 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16mf4(
937 // CHECK-RV32-NEXT: entry:
938 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
939 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
941 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16mf4(
942 // CHECK-RV64-NEXT: entry:
943 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.i64.nxv1i16.i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
944 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
946 vuint32mf2_t test_sf_vc_v_xvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
947 return __riscv_sf_vc_v_xvw_se_u16mf4(p27_26, vd, vs2, rs1, vl);
950 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16mf2(
951 // CHECK-RV32-NEXT: entry:
952 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
953 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
955 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16mf2(
956 // CHECK-RV64-NEXT: entry:
957 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.i64.nxv2i16.i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
958 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
960 vuint32m1_t test_sf_vc_v_xvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
961 return __riscv_sf_vc_v_xvw_se_u16mf2(p27_26, vd, vs2, rs1, vl);
964 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m1(
965 // CHECK-RV32-NEXT: entry:
966 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
967 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
969 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m1(
970 // CHECK-RV64-NEXT: entry:
971 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.i64.nxv4i16.i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
972 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
974 vuint32m2_t test_sf_vc_v_xvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
975 return __riscv_sf_vc_v_xvw_se_u16m1(p27_26, vd, vs2, rs1, vl);
978 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m2(
979 // CHECK-RV32-NEXT: entry:
980 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
981 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
983 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m2(
984 // CHECK-RV64-NEXT: entry:
985 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.i64.nxv8i16.i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
986 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
988 vuint32m4_t test_sf_vc_v_xvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
989 return __riscv_sf_vc_v_xvw_se_u16m2(p27_26, vd, vs2, rs1, vl);
992 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u16m4(
993 // CHECK-RV32-NEXT: entry:
994 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
995 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
997 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u16m4(
998 // CHECK-RV64-NEXT: entry:
999 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.i64.nxv16i16.i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1000 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1002 vuint32m8_t test_sf_vc_v_xvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
1003 return __riscv_sf_vc_v_xvw_se_u16m4(p27_26, vd, vs2, rs1, vl);
1006 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32mf2(
1007 // CHECK-RV32-NEXT: entry:
1008 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1009 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1011 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32mf2(
1012 // CHECK-RV64-NEXT: entry:
1013 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1014 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1016 vuint64m1_t test_sf_vc_v_xvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
1017 return __riscv_sf_vc_v_xvw_se_u32mf2(p27_26, vd, vs2, rs1, vl);
1020 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m1(
1021 // CHECK-RV32-NEXT: entry:
1022 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1023 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1025 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m1(
1026 // CHECK-RV64-NEXT: entry:
1027 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1028 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1030 vuint64m2_t test_sf_vc_v_xvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
1031 return __riscv_sf_vc_v_xvw_se_u32m1(p27_26, vd, vs2, rs1, vl);
1034 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m2(
1035 // CHECK-RV32-NEXT: entry:
1036 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1037 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1039 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m2(
1040 // CHECK-RV64-NEXT: entry:
1041 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1042 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1044 vuint64m4_t test_sf_vc_v_xvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
1045 return __riscv_sf_vc_v_xvw_se_u32m2(p27_26, vd, vs2, rs1, vl);
1048 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_se_u32m4(
1049 // CHECK-RV32-NEXT: entry:
1050 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1051 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1053 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_se_u32m4(
1054 // CHECK-RV64-NEXT: entry:
1055 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1056 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1058 vuint64m8_t test_sf_vc_v_xvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
1059 return __riscv_sf_vc_v_xvw_se_u32m4(p27_26, vd, vs2, rs1, vl);
1062 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf8(
1063 // CHECK-RV32-NEXT: entry:
1064 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.i32.nxv1i8.i8.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1065 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1067 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf8(
1068 // CHECK-RV64-NEXT: entry:
1069 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.i64.nxv1i8.i8.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1070 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1072 vuint16mf4_t test_sf_vc_v_xvw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, uint8_t rs1, size_t vl) {
1073 return __riscv_sf_vc_v_xvw_u8mf8(p27_26, vd, vs2, rs1, vl);
1076 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf4(
1077 // CHECK-RV32-NEXT: entry:
1078 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.i32.nxv2i8.i8.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1079 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1081 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf4(
1082 // CHECK-RV64-NEXT: entry:
1083 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.i64.nxv2i8.i8.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1084 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1086 vuint16mf2_t test_sf_vc_v_xvw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, uint8_t rs1, size_t vl) {
1087 return __riscv_sf_vc_v_xvw_u8mf4(p27_26, vd, vs2, rs1, vl);
1090 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8mf2(
1091 // CHECK-RV32-NEXT: entry:
1092 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.i32.nxv4i8.i8.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1093 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1095 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8mf2(
1096 // CHECK-RV64-NEXT: entry:
1097 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.i64.nxv4i8.i8.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1098 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1100 vuint16m1_t test_sf_vc_v_xvw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, uint8_t rs1, size_t vl) {
1101 return __riscv_sf_vc_v_xvw_u8mf2(p27_26, vd, vs2, rs1, vl);
1104 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m1(
1105 // CHECK-RV32-NEXT: entry:
1106 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.i32.nxv8i8.i8.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1107 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1109 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m1(
1110 // CHECK-RV64-NEXT: entry:
1111 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.i64.nxv8i8.i8.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1112 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1114 vuint16m2_t test_sf_vc_v_xvw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, uint8_t rs1, size_t vl) {
1115 return __riscv_sf_vc_v_xvw_u8m1(p27_26, vd, vs2, rs1, vl);
1118 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m2(
1119 // CHECK-RV32-NEXT: entry:
1120 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.i32.nxv16i8.i8.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1121 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1123 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m2(
1124 // CHECK-RV64-NEXT: entry:
1125 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.i64.nxv16i8.i8.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1126 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1128 vuint16m4_t test_sf_vc_v_xvw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, uint8_t rs1, size_t vl) {
1129 return __riscv_sf_vc_v_xvw_u8m2(p27_26, vd, vs2, rs1, vl);
1132 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u8m4(
1133 // CHECK-RV32-NEXT: entry:
1134 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.i32.nxv32i8.i8.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i32 [[VL:%.*]])
1135 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1137 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u8m4(
1138 // CHECK-RV64-NEXT: entry:
1139 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.i64.nxv32i8.i8.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i8 [[RS1:%.*]], i64 [[VL:%.*]])
1140 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1142 vuint16m8_t test_sf_vc_v_xvw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, uint8_t rs1, size_t vl) {
1143 return __riscv_sf_vc_v_xvw_u8m4(p27_26, vd, vs2, rs1, vl);
1146 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16mf4(
1147 // CHECK-RV32-NEXT: entry:
1148 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.i32.nxv1i16.i16.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1149 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1151 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16mf4(
1152 // CHECK-RV64-NEXT: entry:
1153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.i64.nxv1i16.i16.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1154 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1156 vuint32mf2_t test_sf_vc_v_xvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, uint16_t rs1, size_t vl) {
1157 return __riscv_sf_vc_v_xvw_u16mf4(p27_26, vd, vs2, rs1, vl);
1160 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16mf2(
1161 // CHECK-RV32-NEXT: entry:
1162 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.i32.nxv2i16.i16.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1163 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1165 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16mf2(
1166 // CHECK-RV64-NEXT: entry:
1167 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.i64.nxv2i16.i16.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1168 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1170 vuint32m1_t test_sf_vc_v_xvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, uint16_t rs1, size_t vl) {
1171 return __riscv_sf_vc_v_xvw_u16mf2(p27_26, vd, vs2, rs1, vl);
1174 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m1(
1175 // CHECK-RV32-NEXT: entry:
1176 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.i32.nxv4i16.i16.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1177 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1179 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m1(
1180 // CHECK-RV64-NEXT: entry:
1181 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.i64.nxv4i16.i16.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1182 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1184 vuint32m2_t test_sf_vc_v_xvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, uint16_t rs1, size_t vl) {
1185 return __riscv_sf_vc_v_xvw_u16m1(p27_26, vd, vs2, rs1, vl);
1188 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m2(
1189 // CHECK-RV32-NEXT: entry:
1190 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.i32.nxv8i16.i16.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1191 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1193 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m2(
1194 // CHECK-RV64-NEXT: entry:
1195 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.i64.nxv8i16.i16.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1196 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1198 vuint32m4_t test_sf_vc_v_xvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, uint16_t rs1, size_t vl) {
1199 return __riscv_sf_vc_v_xvw_u16m2(p27_26, vd, vs2, rs1, vl);
1202 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u16m4(
1203 // CHECK-RV32-NEXT: entry:
1204 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.i32.nxv16i16.i16.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i32 [[VL:%.*]])
1205 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1207 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u16m4(
1208 // CHECK-RV64-NEXT: entry:
1209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.i64.nxv16i16.i16.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i16 [[RS1:%.*]], i64 [[VL:%.*]])
1210 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1212 vuint32m8_t test_sf_vc_v_xvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, uint16_t rs1, size_t vl) {
1213 return __riscv_sf_vc_v_xvw_u16m4(p27_26, vd, vs2, rs1, vl);
1216 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32mf2(
1217 // CHECK-RV32-NEXT: entry:
1218 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1219 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1221 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32mf2(
1222 // CHECK-RV64-NEXT: entry:
1223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.i64.nxv1i32.i32.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1224 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1226 vuint64m1_t test_sf_vc_v_xvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, uint32_t rs1, size_t vl) {
1227 return __riscv_sf_vc_v_xvw_u32mf2(p27_26, vd, vs2, rs1, vl);
1230 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m1(
1231 // CHECK-RV32-NEXT: entry:
1232 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1233 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1235 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m1(
1236 // CHECK-RV64-NEXT: entry:
1237 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.i64.nxv2i32.i32.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1238 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1240 vuint64m2_t test_sf_vc_v_xvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, uint32_t rs1, size_t vl) {
1241 return __riscv_sf_vc_v_xvw_u32m1(p27_26, vd, vs2, rs1, vl);
1244 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m2(
1245 // CHECK-RV32-NEXT: entry:
1246 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1247 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1249 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m2(
1250 // CHECK-RV64-NEXT: entry:
1251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.i64.nxv4i32.i32.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1252 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1254 vuint64m4_t test_sf_vc_v_xvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, uint32_t rs1, size_t vl) {
1255 return __riscv_sf_vc_v_xvw_u32m2(p27_26, vd, vs2, rs1, vl);
1258 // CHECK-RV32-LABEL: @test_sf_vc_v_xvw_u32m4(
1259 // CHECK-RV32-NEXT: entry:
1260 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i32 [[VL:%.*]])
1261 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1263 // CHECK-RV64-LABEL: @test_sf_vc_v_xvw_u32m4(
1264 // CHECK-RV64-NEXT: entry:
1265 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.i64.nxv8i32.i32.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 [[RS1:%.*]], i64 [[VL:%.*]])
1266 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1268 vuint64m8_t test_sf_vc_v_xvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, uint32_t rs1, size_t vl) {
1269 return __riscv_sf_vc_v_xvw_u32m4(p27_26, vd, vs2, rs1, vl);
1272 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8mf8(
1273 // CHECK-RV32-NEXT: entry:
1274 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i16.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1275 // CHECK-RV32-NEXT: ret void
1277 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8mf8(
1278 // CHECK-RV64-NEXT: entry:
1279 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv1i16.nxv1i8.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1280 // CHECK-RV64-NEXT: ret void
1282 void test_sf_vc_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) {
1283 __riscv_sf_vc_ivw_se_u8mf8(p27_26, vd, vs2, simm5, vl);
1286 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8mf4(
1287 // CHECK-RV32-NEXT: entry:
1288 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i16.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1289 // CHECK-RV32-NEXT: ret void
1291 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8mf4(
1292 // CHECK-RV64-NEXT: entry:
1293 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv2i16.nxv2i8.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1294 // CHECK-RV64-NEXT: ret void
1296 void test_sf_vc_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
1297 __riscv_sf_vc_ivw_se_u8mf4(p27_26, vd, vs2, simm5, vl);
1300 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8mf2(
1301 // CHECK-RV32-NEXT: entry:
1302 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i16.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1303 // CHECK-RV32-NEXT: ret void
1305 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8mf2(
1306 // CHECK-RV64-NEXT: entry:
1307 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv4i16.nxv4i8.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1308 // CHECK-RV64-NEXT: ret void
1310 void test_sf_vc_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
1311 __riscv_sf_vc_ivw_se_u8mf2(p27_26, vd, vs2, simm5, vl);
1314 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8m1(
1315 // CHECK-RV32-NEXT: entry:
1316 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i16.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1317 // CHECK-RV32-NEXT: ret void
1319 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8m1(
1320 // CHECK-RV64-NEXT: entry:
1321 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv8i16.nxv8i8.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1322 // CHECK-RV64-NEXT: ret void
1324 void test_sf_vc_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
1325 __riscv_sf_vc_ivw_se_u8m1(p27_26, vd, vs2, simm5, vl);
1328 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8m2(
1329 // CHECK-RV32-NEXT: entry:
1330 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv16i16.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1331 // CHECK-RV32-NEXT: ret void
1333 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8m2(
1334 // CHECK-RV64-NEXT: entry:
1335 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv16i16.nxv16i8.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1336 // CHECK-RV64-NEXT: ret void
1338 void test_sf_vc_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
1339 __riscv_sf_vc_ivw_se_u8m2(p27_26, vd, vs2, simm5, vl);
1342 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u8m4(
1343 // CHECK-RV32-NEXT: entry:
1344 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv32i16.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1345 // CHECK-RV32-NEXT: ret void
1347 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u8m4(
1348 // CHECK-RV64-NEXT: entry:
1349 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv32i16.nxv32i8.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1350 // CHECK-RV64-NEXT: ret void
1352 void test_sf_vc_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
1353 __riscv_sf_vc_ivw_se_u8m4(p27_26, vd, vs2, simm5, vl);
1356 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16mf4(
1357 // CHECK-RV32-NEXT: entry:
1358 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1359 // CHECK-RV32-NEXT: ret void
1361 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16mf4(
1362 // CHECK-RV64-NEXT: entry:
1363 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv1i32.nxv1i16.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1364 // CHECK-RV64-NEXT: ret void
1366 void test_sf_vc_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) {
1367 __riscv_sf_vc_ivw_se_u16mf4(p27_26, vd, vs2, simm5, vl);
1370 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16mf2(
1371 // CHECK-RV32-NEXT: entry:
1372 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1373 // CHECK-RV32-NEXT: ret void
1375 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16mf2(
1376 // CHECK-RV64-NEXT: entry:
1377 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv2i32.nxv2i16.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1378 // CHECK-RV64-NEXT: ret void
1380 void test_sf_vc_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) {
1381 __riscv_sf_vc_ivw_se_u16mf2(p27_26, vd, vs2, simm5, vl);
1384 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16m1(
1385 // CHECK-RV32-NEXT: entry:
1386 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1387 // CHECK-RV32-NEXT: ret void
1389 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16m1(
1390 // CHECK-RV64-NEXT: entry:
1391 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv4i32.nxv4i16.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1392 // CHECK-RV64-NEXT: ret void
1394 void test_sf_vc_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
1395 __riscv_sf_vc_ivw_se_u16m1(p27_26, vd, vs2, simm5, vl);
1398 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16m2(
1399 // CHECK-RV32-NEXT: entry:
1400 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1401 // CHECK-RV32-NEXT: ret void
1403 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16m2(
1404 // CHECK-RV64-NEXT: entry:
1405 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv8i32.nxv8i16.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1406 // CHECK-RV64-NEXT: ret void
1408 void test_sf_vc_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
1409 __riscv_sf_vc_ivw_se_u16m2(p27_26, vd, vs2, simm5, vl);
1412 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u16m4(
1413 // CHECK-RV32-NEXT: entry:
1414 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv16i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1415 // CHECK-RV32-NEXT: ret void
1417 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u16m4(
1418 // CHECK-RV64-NEXT: entry:
1419 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv16i32.nxv16i16.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1420 // CHECK-RV64-NEXT: ret void
1422 void test_sf_vc_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
1423 __riscv_sf_vc_ivw_se_u16m4(p27_26, vd, vs2, simm5, vl);
1426 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u32mf2(
1427 // CHECK-RV32-NEXT: entry:
1428 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv1i64.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1429 // CHECK-RV32-NEXT: ret void
1431 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u32mf2(
1432 // CHECK-RV64-NEXT: entry:
1433 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv1i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1434 // CHECK-RV64-NEXT: ret void
1436 void test_sf_vc_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) {
1437 __riscv_sf_vc_ivw_se_u32mf2(p27_26, vd, vs2, simm5, vl);
1440 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u32m1(
1441 // CHECK-RV32-NEXT: entry:
1442 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv2i64.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1443 // CHECK-RV32-NEXT: ret void
1445 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u32m1(
1446 // CHECK-RV64-NEXT: entry:
1447 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv2i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1448 // CHECK-RV64-NEXT: ret void
1450 void test_sf_vc_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
1451 __riscv_sf_vc_ivw_se_u32m1(p27_26, vd, vs2, simm5, vl);
1454 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u32m2(
1455 // CHECK-RV32-NEXT: entry:
1456 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv4i64.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1457 // CHECK-RV32-NEXT: ret void
1459 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u32m2(
1460 // CHECK-RV64-NEXT: entry:
1461 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv4i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1462 // CHECK-RV64-NEXT: ret void
1464 void test_sf_vc_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
1465 __riscv_sf_vc_ivw_se_u32m2(p27_26, vd, vs2, simm5, vl);
1468 // CHECK-RV32-LABEL: @test_sf_vc_ivw_se_u32m4(
1469 // CHECK-RV32-NEXT: entry:
1470 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i32.nxv8i64.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1471 // CHECK-RV32-NEXT: ret void
1473 // CHECK-RV64-LABEL: @test_sf_vc_ivw_se_u32m4(
1474 // CHECK-RV64-NEXT: entry:
1475 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.ivw.se.i64.nxv8i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1476 // CHECK-RV64-NEXT: ret void
1478 void test_sf_vc_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
1479 __riscv_sf_vc_ivw_se_u32m4(p27_26, vd, vs2, simm5, vl);
1482 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf8(
1483 // CHECK-RV32-NEXT: entry:
1484 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i32.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1485 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1487 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf8(
1488 // CHECK-RV64-NEXT: entry:
1489 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.i64.nxv1i8.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1490 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1492 vuint16mf4_t test_sf_vc_v_ivw_se_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) {
1493 return __riscv_sf_vc_v_ivw_se_u8mf8(p27_26, vd, vs2, simm5, vl);
1496 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf4(
1497 // CHECK-RV32-NEXT: entry:
1498 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i32.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1499 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1501 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf4(
1502 // CHECK-RV64-NEXT: entry:
1503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.i64.nxv2i8.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1504 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1506 vuint16mf2_t test_sf_vc_v_ivw_se_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
1507 return __riscv_sf_vc_v_ivw_se_u8mf4(p27_26, vd, vs2, simm5, vl);
1510 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8mf2(
1511 // CHECK-RV32-NEXT: entry:
1512 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i32.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1513 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1515 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8mf2(
1516 // CHECK-RV64-NEXT: entry:
1517 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.i64.nxv4i8.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1518 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1520 vuint16m1_t test_sf_vc_v_ivw_se_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
1521 return __riscv_sf_vc_v_ivw_se_u8mf2(p27_26, vd, vs2, simm5, vl);
1524 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m1(
1525 // CHECK-RV32-NEXT: entry:
1526 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i32.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1527 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1529 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m1(
1530 // CHECK-RV64-NEXT: entry:
1531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.i64.nxv8i8.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1532 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1534 vuint16m2_t test_sf_vc_v_ivw_se_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
1535 return __riscv_sf_vc_v_ivw_se_u8m1(p27_26, vd, vs2, simm5, vl);
1538 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m2(
1539 // CHECK-RV32-NEXT: entry:
1540 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i32.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1541 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1543 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m2(
1544 // CHECK-RV64-NEXT: entry:
1545 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.i64.nxv16i8.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1546 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1548 vuint16m4_t test_sf_vc_v_ivw_se_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
1549 return __riscv_sf_vc_v_ivw_se_u8m2(p27_26, vd, vs2, simm5, vl);
1552 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u8m4(
1553 // CHECK-RV32-NEXT: entry:
1554 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i32.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1555 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1557 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u8m4(
1558 // CHECK-RV64-NEXT: entry:
1559 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.i64.nxv32i8.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1560 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1562 vuint16m8_t test_sf_vc_v_ivw_se_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
1563 return __riscv_sf_vc_v_ivw_se_u8m4(p27_26, vd, vs2, simm5, vl);
1566 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16mf4(
1567 // CHECK-RV32-NEXT: entry:
1568 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1569 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1571 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16mf4(
1572 // CHECK-RV64-NEXT: entry:
1573 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.i64.nxv1i16.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1574 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1576 vuint32mf2_t test_sf_vc_v_ivw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) {
1577 return __riscv_sf_vc_v_ivw_se_u16mf4(p27_26, vd, vs2, simm5, vl);
1580 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16mf2(
1581 // CHECK-RV32-NEXT: entry:
1582 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1583 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1585 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16mf2(
1586 // CHECK-RV64-NEXT: entry:
1587 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.i64.nxv2i16.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1588 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1590 vuint32m1_t test_sf_vc_v_ivw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) {
1591 return __riscv_sf_vc_v_ivw_se_u16mf2(p27_26, vd, vs2, simm5, vl);
1594 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m1(
1595 // CHECK-RV32-NEXT: entry:
1596 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1597 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1599 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m1(
1600 // CHECK-RV64-NEXT: entry:
1601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.i64.nxv4i16.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1602 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1604 vuint32m2_t test_sf_vc_v_ivw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
1605 return __riscv_sf_vc_v_ivw_se_u16m1(p27_26, vd, vs2, simm5, vl);
1608 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m2(
1609 // CHECK-RV32-NEXT: entry:
1610 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1611 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1613 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m2(
1614 // CHECK-RV64-NEXT: entry:
1615 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.i64.nxv8i16.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1616 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1618 vuint32m4_t test_sf_vc_v_ivw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
1619 return __riscv_sf_vc_v_ivw_se_u16m2(p27_26, vd, vs2, simm5, vl);
1622 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u16m4(
1623 // CHECK-RV32-NEXT: entry:
1624 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1625 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1627 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u16m4(
1628 // CHECK-RV64-NEXT: entry:
1629 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.i64.nxv16i16.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1630 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1632 vuint32m8_t test_sf_vc_v_ivw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
1633 return __riscv_sf_vc_v_ivw_se_u16m4(p27_26, vd, vs2, simm5, vl);
1636 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32mf2(
1637 // CHECK-RV32-NEXT: entry:
1638 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1639 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1641 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32mf2(
1642 // CHECK-RV64-NEXT: entry:
1643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1644 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1646 vuint64m1_t test_sf_vc_v_ivw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) {
1647 return __riscv_sf_vc_v_ivw_se_u32mf2(p27_26, vd, vs2, simm5, vl);
1650 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m1(
1651 // CHECK-RV32-NEXT: entry:
1652 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1653 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1655 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m1(
1656 // CHECK-RV64-NEXT: entry:
1657 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1658 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1660 vuint64m2_t test_sf_vc_v_ivw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
1661 return __riscv_sf_vc_v_ivw_se_u32m1(p27_26, vd, vs2, simm5, vl);
1664 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m2(
1665 // CHECK-RV32-NEXT: entry:
1666 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1667 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1669 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m2(
1670 // CHECK-RV64-NEXT: entry:
1671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1672 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1674 vuint64m4_t test_sf_vc_v_ivw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
1675 return __riscv_sf_vc_v_ivw_se_u32m2(p27_26, vd, vs2, simm5, vl);
1678 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_se_u32m4(
1679 // CHECK-RV32-NEXT: entry:
1680 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1681 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1683 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_se_u32m4(
1684 // CHECK-RV64-NEXT: entry:
1685 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1686 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1688 vuint64m8_t test_sf_vc_v_ivw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
1689 return __riscv_sf_vc_v_ivw_se_u32m4(p27_26, vd, vs2, simm5, vl);
1692 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf8(
1693 // CHECK-RV32-NEXT: entry:
1694 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.i32.nxv1i8.i32.i32(i32 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1695 // CHECK-RV32-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1697 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf8(
1698 // CHECK-RV64-NEXT: entry:
1699 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.i64.nxv1i8.i64.i64(i64 3, <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1700 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1702 vuint16mf4_t test_sf_vc_v_ivw_u8mf8(vuint16mf4_t vd, vuint8mf8_t vs2, size_t vl) {
1703 return __riscv_sf_vc_v_ivw_u8mf8(p27_26, vd, vs2, simm5, vl);
1706 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf4(
1707 // CHECK-RV32-NEXT: entry:
1708 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.i32.nxv2i8.i32.i32(i32 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1709 // CHECK-RV32-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1711 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf4(
1712 // CHECK-RV64-NEXT: entry:
1713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.i64.nxv2i8.i64.i64(i64 3, <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1714 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1716 vuint16mf2_t test_sf_vc_v_ivw_u8mf4(vuint16mf2_t vd, vuint8mf4_t vs2, size_t vl) {
1717 return __riscv_sf_vc_v_ivw_u8mf4(p27_26, vd, vs2, simm5, vl);
1720 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8mf2(
1721 // CHECK-RV32-NEXT: entry:
1722 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.i32.nxv4i8.i32.i32(i32 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1723 // CHECK-RV32-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1725 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8mf2(
1726 // CHECK-RV64-NEXT: entry:
1727 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.i64.nxv4i8.i64.i64(i64 3, <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1728 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1730 vuint16m1_t test_sf_vc_v_ivw_u8mf2(vuint16m1_t vd, vuint8mf2_t vs2, size_t vl) {
1731 return __riscv_sf_vc_v_ivw_u8mf2(p27_26, vd, vs2, simm5, vl);
1734 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m1(
1735 // CHECK-RV32-NEXT: entry:
1736 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.i32.nxv8i8.i32.i32(i32 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1737 // CHECK-RV32-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1739 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m1(
1740 // CHECK-RV64-NEXT: entry:
1741 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.i64.nxv8i8.i64.i64(i64 3, <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1742 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1744 vuint16m2_t test_sf_vc_v_ivw_u8m1(vuint16m2_t vd, vuint8m1_t vs2, size_t vl) {
1745 return __riscv_sf_vc_v_ivw_u8m1(p27_26, vd, vs2, simm5, vl);
1748 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m2(
1749 // CHECK-RV32-NEXT: entry:
1750 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.i32.nxv16i8.i32.i32(i32 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1751 // CHECK-RV32-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1753 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m2(
1754 // CHECK-RV64-NEXT: entry:
1755 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.i64.nxv16i8.i64.i64(i64 3, <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1756 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1758 vuint16m4_t test_sf_vc_v_ivw_u8m2(vuint16m4_t vd, vuint8m2_t vs2, size_t vl) {
1759 return __riscv_sf_vc_v_ivw_u8m2(p27_26, vd, vs2, simm5, vl);
1762 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u8m4(
1763 // CHECK-RV32-NEXT: entry:
1764 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.i32.nxv32i8.i32.i32(i32 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1765 // CHECK-RV32-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1767 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u8m4(
1768 // CHECK-RV64-NEXT: entry:
1769 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.i64.nxv32i8.i64.i64(i64 3, <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1770 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1772 vuint16m8_t test_sf_vc_v_ivw_u8m4(vuint16m8_t vd, vuint8m4_t vs2, size_t vl) {
1773 return __riscv_sf_vc_v_ivw_u8m4(p27_26, vd, vs2, simm5, vl);
1776 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16mf4(
1777 // CHECK-RV32-NEXT: entry:
1778 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.i32.nxv1i16.i32.i32(i32 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1779 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1781 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16mf4(
1782 // CHECK-RV64-NEXT: entry:
1783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.i64.nxv1i16.i64.i64(i64 3, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1784 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1786 vuint32mf2_t test_sf_vc_v_ivw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, size_t vl) {
1787 return __riscv_sf_vc_v_ivw_u16mf4(p27_26, vd, vs2, simm5, vl);
1790 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16mf2(
1791 // CHECK-RV32-NEXT: entry:
1792 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.i32.nxv2i16.i32.i32(i32 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1793 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1795 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16mf2(
1796 // CHECK-RV64-NEXT: entry:
1797 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.i64.nxv2i16.i64.i64(i64 3, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1798 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1800 vuint32m1_t test_sf_vc_v_ivw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, size_t vl) {
1801 return __riscv_sf_vc_v_ivw_u16mf2(p27_26, vd, vs2, simm5, vl);
1804 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m1(
1805 // CHECK-RV32-NEXT: entry:
1806 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.i32.nxv4i16.i32.i32(i32 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1807 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1809 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m1(
1810 // CHECK-RV64-NEXT: entry:
1811 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.i64.nxv4i16.i64.i64(i64 3, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1812 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1814 vuint32m2_t test_sf_vc_v_ivw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, size_t vl) {
1815 return __riscv_sf_vc_v_ivw_u16m1(p27_26, vd, vs2, simm5, vl);
1818 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m2(
1819 // CHECK-RV32-NEXT: entry:
1820 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.i32.nxv8i16.i32.i32(i32 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1821 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1823 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m2(
1824 // CHECK-RV64-NEXT: entry:
1825 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.i64.nxv8i16.i64.i64(i64 3, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1826 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1828 vuint32m4_t test_sf_vc_v_ivw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, size_t vl) {
1829 return __riscv_sf_vc_v_ivw_u16m2(p27_26, vd, vs2, simm5, vl);
1832 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u16m4(
1833 // CHECK-RV32-NEXT: entry:
1834 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.i32.nxv16i16.i32.i32(i32 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1835 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1837 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u16m4(
1838 // CHECK-RV64-NEXT: entry:
1839 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.i64.nxv16i16.i64.i64(i64 3, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1840 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1842 vuint32m8_t test_sf_vc_v_ivw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, size_t vl) {
1843 return __riscv_sf_vc_v_ivw_u16m4(p27_26, vd, vs2, simm5, vl);
1846 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32mf2(
1847 // CHECK-RV32-NEXT: entry:
1848 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.i32.nxv1i32.i32.i32(i32 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1849 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1851 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32mf2(
1852 // CHECK-RV64-NEXT: entry:
1853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.i64.nxv1i32.i64.i64(i64 3, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1854 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1856 vuint64m1_t test_sf_vc_v_ivw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, size_t vl) {
1857 return __riscv_sf_vc_v_ivw_u32mf2(p27_26, vd, vs2, simm5, vl);
1860 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m1(
1861 // CHECK-RV32-NEXT: entry:
1862 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.i32.nxv2i32.i32.i32(i32 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1863 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1865 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m1(
1866 // CHECK-RV64-NEXT: entry:
1867 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.i64.nxv2i32.i64.i64(i64 3, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1868 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1870 vuint64m2_t test_sf_vc_v_ivw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, size_t vl) {
1871 return __riscv_sf_vc_v_ivw_u32m1(p27_26, vd, vs2, simm5, vl);
1874 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m2(
1875 // CHECK-RV32-NEXT: entry:
1876 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.i32.nxv4i32.i32.i32(i32 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1877 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1879 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m2(
1880 // CHECK-RV64-NEXT: entry:
1881 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.i64.nxv4i32.i64.i64(i64 3, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1882 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1884 vuint64m4_t test_sf_vc_v_ivw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, size_t vl) {
1885 return __riscv_sf_vc_v_ivw_u32m2(p27_26, vd, vs2, simm5, vl);
1888 // CHECK-RV32-LABEL: @test_sf_vc_v_ivw_u32m4(
1889 // CHECK-RV32-NEXT: entry:
1890 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.i32.nxv8i32.i32.i32(i32 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i32 10, i32 [[VL:%.*]])
1891 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1893 // CHECK-RV64-LABEL: @test_sf_vc_v_ivw_u32m4(
1894 // CHECK-RV64-NEXT: entry:
1895 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.i64.nxv8i32.i64.i64(i64 3, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 10, i64 [[VL:%.*]])
1896 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1898 vuint64m8_t test_sf_vc_v_ivw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, size_t vl) {
1899 return __riscv_sf_vc_v_ivw_u32m4(p27_26, vd, vs2, simm5, vl);
1902 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16mf4(
1903 // CHECK-RV32-NEXT: entry:
1904 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv1i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
1905 // CHECK-RV32-NEXT: ret void
1907 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16mf4(
1908 // CHECK-RV64-NEXT: entry:
1909 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv1i32.nxv1i16.f16.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
1910 // CHECK-RV64-NEXT: ret void
1912 void test_sf_vc_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
1913 __riscv_sf_vc_fvw_se_u16mf4(p26, vd, vs2, fs1, vl);
1916 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16mf2(
1917 // CHECK-RV32-NEXT: entry:
1918 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv2i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
1919 // CHECK-RV32-NEXT: ret void
1921 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16mf2(
1922 // CHECK-RV64-NEXT: entry:
1923 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv2i32.nxv2i16.f16.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
1924 // CHECK-RV64-NEXT: ret void
1926 void test_sf_vc_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
1927 __riscv_sf_vc_fvw_se_u16mf2(p26, vd, vs2, fs1, vl);
1930 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16m1(
1931 // CHECK-RV32-NEXT: entry:
1932 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv4i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
1933 // CHECK-RV32-NEXT: ret void
1935 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16m1(
1936 // CHECK-RV64-NEXT: entry:
1937 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv4i32.nxv4i16.f16.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
1938 // CHECK-RV64-NEXT: ret void
1940 void test_sf_vc_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
1941 __riscv_sf_vc_fvw_se_u16m1(p26, vd, vs2, fs1, vl);
1944 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16m2(
1945 // CHECK-RV32-NEXT: entry:
1946 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv8i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
1947 // CHECK-RV32-NEXT: ret void
1949 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16m2(
1950 // CHECK-RV64-NEXT: entry:
1951 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv8i32.nxv8i16.f16.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
1952 // CHECK-RV64-NEXT: ret void
1954 void test_sf_vc_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
1955 __riscv_sf_vc_fvw_se_u16m2(p26, vd, vs2, fs1, vl);
1958 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u16m4(
1959 // CHECK-RV32-NEXT: entry:
1960 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv16i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
1961 // CHECK-RV32-NEXT: ret void
1963 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u16m4(
1964 // CHECK-RV64-NEXT: entry:
1965 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv16i32.nxv16i16.f16.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
1966 // CHECK-RV64-NEXT: ret void
1968 void test_sf_vc_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
1969 __riscv_sf_vc_fvw_se_u16m4(p26, vd, vs2, fs1, vl);
1972 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u32mf2(
1973 // CHECK-RV32-NEXT: entry:
1974 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv1i64.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
1975 // CHECK-RV32-NEXT: ret void
1977 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u32mf2(
1978 // CHECK-RV64-NEXT: entry:
1979 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv1i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
1980 // CHECK-RV64-NEXT: ret void
1982 void test_sf_vc_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
1983 __riscv_sf_vc_fvw_se_u32mf2(p26, vd, vs2, fs1, vl);
1986 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u32m1(
1987 // CHECK-RV32-NEXT: entry:
1988 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv2i64.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
1989 // CHECK-RV32-NEXT: ret void
1991 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u32m1(
1992 // CHECK-RV64-NEXT: entry:
1993 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv2i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
1994 // CHECK-RV64-NEXT: ret void
1996 void test_sf_vc_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
1997 __riscv_sf_vc_fvw_se_u32m1(p26, vd, vs2, fs1, vl);
2000 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u32m2(
2001 // CHECK-RV32-NEXT: entry:
2002 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv4i64.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2003 // CHECK-RV32-NEXT: ret void
2005 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u32m2(
2006 // CHECK-RV64-NEXT: entry:
2007 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv4i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2008 // CHECK-RV64-NEXT: ret void
2010 void test_sf_vc_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
2011 __riscv_sf_vc_fvw_se_u32m2(p26, vd, vs2, fs1, vl);
2014 // CHECK-RV32-LABEL: @test_sf_vc_fvw_se_u32m4(
2015 // CHECK-RV32-NEXT: entry:
2016 // CHECK-RV32-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i32.nxv8i64.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2017 // CHECK-RV32-NEXT: ret void
2019 // CHECK-RV64-LABEL: @test_sf_vc_fvw_se_u32m4(
2020 // CHECK-RV64-NEXT: entry:
2021 // CHECK-RV64-NEXT: call void @llvm.riscv.sf.vc.fvw.se.i64.nxv8i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2022 // CHECK-RV64-NEXT: ret void
2024 void test_sf_vc_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
2025 __riscv_sf_vc_fvw_se_u32m4(p26, vd, vs2, fs1, vl);
2028 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16mf4(
2029 // CHECK-RV32-NEXT: entry:
2030 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2031 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2033 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16mf4(
2034 // CHECK-RV64-NEXT: entry:
2035 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.i64.nxv1i16.f16.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2036 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2038 vuint32mf2_t test_sf_vc_v_fvw_se_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
2039 return __riscv_sf_vc_v_fvw_se_u16mf4(p26, vd, vs2, fs1, vl);
2042 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16mf2(
2043 // CHECK-RV32-NEXT: entry:
2044 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2045 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2047 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16mf2(
2048 // CHECK-RV64-NEXT: entry:
2049 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.i64.nxv2i16.f16.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2050 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2052 vuint32m1_t test_sf_vc_v_fvw_se_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
2053 return __riscv_sf_vc_v_fvw_se_u16mf2(p26, vd, vs2, fs1, vl);
2056 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m1(
2057 // CHECK-RV32-NEXT: entry:
2058 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2059 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2061 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m1(
2062 // CHECK-RV64-NEXT: entry:
2063 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.i64.nxv4i16.f16.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2064 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2066 vuint32m2_t test_sf_vc_v_fvw_se_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
2067 return __riscv_sf_vc_v_fvw_se_u16m1(p26, vd, vs2, fs1, vl);
2070 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m2(
2071 // CHECK-RV32-NEXT: entry:
2072 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2073 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2075 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m2(
2076 // CHECK-RV64-NEXT: entry:
2077 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.i64.nxv8i16.f16.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2078 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2080 vuint32m4_t test_sf_vc_v_fvw_se_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
2081 return __riscv_sf_vc_v_fvw_se_u16m2(p26, vd, vs2, fs1, vl);
2084 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u16m4(
2085 // CHECK-RV32-NEXT: entry:
2086 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2087 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2089 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u16m4(
2090 // CHECK-RV64-NEXT: entry:
2091 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.i64.nxv16i16.f16.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2092 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2094 vuint32m8_t test_sf_vc_v_fvw_se_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
2095 return __riscv_sf_vc_v_fvw_se_u16m4(p26, vd, vs2, fs1, vl);
2098 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32mf2(
2099 // CHECK-RV32-NEXT: entry:
2100 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2101 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2103 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32mf2(
2104 // CHECK-RV64-NEXT: entry:
2105 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2106 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2108 vuint64m1_t test_sf_vc_v_fvw_se_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
2109 return __riscv_sf_vc_v_fvw_se_u32mf2(p26, vd, vs2, fs1, vl);
2112 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m1(
2113 // CHECK-RV32-NEXT: entry:
2114 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2115 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2117 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m1(
2118 // CHECK-RV64-NEXT: entry:
2119 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2120 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2122 vuint64m2_t test_sf_vc_v_fvw_se_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
2123 return __riscv_sf_vc_v_fvw_se_u32m1(p26, vd, vs2, fs1, vl);
2126 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m2(
2127 // CHECK-RV32-NEXT: entry:
2128 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2129 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2131 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m2(
2132 // CHECK-RV64-NEXT: entry:
2133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2134 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2136 vuint64m4_t test_sf_vc_v_fvw_se_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
2137 return __riscv_sf_vc_v_fvw_se_u32m2(p26, vd, vs2, fs1, vl);
2140 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_se_u32m4(
2141 // CHECK-RV32-NEXT: entry:
2142 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2143 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2145 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_se_u32m4(
2146 // CHECK-RV64-NEXT: entry:
2147 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2148 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2150 vuint64m8_t test_sf_vc_v_fvw_se_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
2151 return __riscv_sf_vc_v_fvw_se_u32m4(p26, vd, vs2, fs1, vl);
2154 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16mf4(
2155 // CHECK-RV32-NEXT: entry:
2156 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.i32.nxv1i16.f16.i32(i32 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2157 // CHECK-RV32-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2159 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16mf4(
2160 // CHECK-RV64-NEXT: entry:
2161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.i64.nxv1i16.f16.i64(i64 1, <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2162 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2164 vuint32mf2_t test_sf_vc_v_fvw_u16mf4(vuint32mf2_t vd, vuint16mf4_t vs2, _Float16 fs1, size_t vl) {
2165 return __riscv_sf_vc_v_fvw_u16mf4(p26, vd, vs2, fs1, vl);
2168 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16mf2(
2169 // CHECK-RV32-NEXT: entry:
2170 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.i32.nxv2i16.f16.i32(i32 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2171 // CHECK-RV32-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2173 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16mf2(
2174 // CHECK-RV64-NEXT: entry:
2175 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.i64.nxv2i16.f16.i64(i64 1, <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2176 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2178 vuint32m1_t test_sf_vc_v_fvw_u16mf2(vuint32m1_t vd, vuint16mf2_t vs2, _Float16 fs1, size_t vl) {
2179 return __riscv_sf_vc_v_fvw_u16mf2(p26, vd, vs2, fs1, vl);
2182 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m1(
2183 // CHECK-RV32-NEXT: entry:
2184 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.i32.nxv4i16.f16.i32(i32 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2185 // CHECK-RV32-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2187 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m1(
2188 // CHECK-RV64-NEXT: entry:
2189 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.i64.nxv4i16.f16.i64(i64 1, <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2190 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2192 vuint32m2_t test_sf_vc_v_fvw_u16m1(vuint32m2_t vd, vuint16m1_t vs2, _Float16 fs1, size_t vl) {
2193 return __riscv_sf_vc_v_fvw_u16m1(p26, vd, vs2, fs1, vl);
2196 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m2(
2197 // CHECK-RV32-NEXT: entry:
2198 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.i32.nxv8i16.f16.i32(i32 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2199 // CHECK-RV32-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2201 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m2(
2202 // CHECK-RV64-NEXT: entry:
2203 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.i64.nxv8i16.f16.i64(i64 1, <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2204 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2206 vuint32m4_t test_sf_vc_v_fvw_u16m2(vuint32m4_t vd, vuint16m2_t vs2, _Float16 fs1, size_t vl) {
2207 return __riscv_sf_vc_v_fvw_u16m2(p26, vd, vs2, fs1, vl);
2210 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u16m4(
2211 // CHECK-RV32-NEXT: entry:
2212 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.i32.nxv16i16.f16.i32(i32 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i32 [[VL:%.*]])
2213 // CHECK-RV32-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2215 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u16m4(
2216 // CHECK-RV64-NEXT: entry:
2217 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.i64.nxv16i16.f16.i64(i64 1, <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], half [[FS1:%.*]], i64 [[VL:%.*]])
2218 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2220 vuint32m8_t test_sf_vc_v_fvw_u16m4(vuint32m8_t vd, vuint16m4_t vs2, _Float16 fs1, size_t vl) {
2221 return __riscv_sf_vc_v_fvw_u16m4(p26, vd, vs2, fs1, vl);
2224 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32mf2(
2225 // CHECK-RV32-NEXT: entry:
2226 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.i32.nxv1i32.f32.i32(i32 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2227 // CHECK-RV32-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2229 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32mf2(
2230 // CHECK-RV64-NEXT: entry:
2231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.i64.nxv1i32.f32.i64(i64 1, <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2232 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2234 vuint64m1_t test_sf_vc_v_fvw_u32mf2(vuint64m1_t vd, vuint32mf2_t vs2, float fs1, size_t vl) {
2235 return __riscv_sf_vc_v_fvw_u32mf2(p26, vd, vs2, fs1, vl);
2238 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m1(
2239 // CHECK-RV32-NEXT: entry:
2240 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.i32.nxv2i32.f32.i32(i32 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2241 // CHECK-RV32-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2243 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m1(
2244 // CHECK-RV64-NEXT: entry:
2245 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.i64.nxv2i32.f32.i64(i64 1, <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2246 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2248 vuint64m2_t test_sf_vc_v_fvw_u32m1(vuint64m2_t vd, vuint32m1_t vs2, float fs1, size_t vl) {
2249 return __riscv_sf_vc_v_fvw_u32m1(p26, vd, vs2, fs1, vl);
2252 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m2(
2253 // CHECK-RV32-NEXT: entry:
2254 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.i32.nxv4i32.f32.i32(i32 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2255 // CHECK-RV32-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2257 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m2(
2258 // CHECK-RV64-NEXT: entry:
2259 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.i64.nxv4i32.f32.i64(i64 1, <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2260 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2262 vuint64m4_t test_sf_vc_v_fvw_u32m2(vuint64m4_t vd, vuint32m2_t vs2, float fs1, size_t vl) {
2263 return __riscv_sf_vc_v_fvw_u32m2(p26, vd, vs2, fs1, vl);
2266 // CHECK-RV32-LABEL: @test_sf_vc_v_fvw_u32m4(
2267 // CHECK-RV32-NEXT: entry:
2268 // CHECK-RV32-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.i32.nxv8i32.f32.i32(i32 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i32 [[VL:%.*]])
2269 // CHECK-RV32-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2271 // CHECK-RV64-LABEL: @test_sf_vc_v_fvw_u32m4(
2272 // CHECK-RV64-NEXT: entry:
2273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.i64.nxv8i32.f32.i64(i64 1, <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], float [[FS1:%.*]], i64 [[VL:%.*]])
2274 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2276 vuint64m8_t test_sf_vc_v_fvw_u32m4(vuint64m8_t vd, vuint32m4_t vs2, float fs1, size_t vl) {
2277 return __riscv_sf_vc_v_fvw_u32m4(p26, vd, vs2, fs1, vl);