1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 define void @test_sf_vc_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
8 ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf8:
9 ; CHECK: # %bb.0: # %entry
10 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
11 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
14 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
18 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
20 define void @test_sf_vc_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
21 ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
24 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
27 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
31 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
33 define void @test_sf_vc_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
34 ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
37 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
40 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
44 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
46 define void @test_sf_vc_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
47 ; CHECK-LABEL: test_sf_vc_vvv_se_e8m1:
48 ; CHECK: # %bb.0: # %entry
49 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
50 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
53 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
57 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
59 define void @test_sf_vc_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
60 ; CHECK-LABEL: test_sf_vc_vvv_se_e8m2:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
63 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
66 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
70 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
72 define void @test_sf_vc_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
73 ; CHECK-LABEL: test_sf_vc_vvv_se_e8m4:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
76 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
79 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
83 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
85 define void @test_sf_vc_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
86 ; CHECK-LABEL: test_sf_vc_vvv_se_e8m8:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vl8r.v v24, (a0)
89 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
90 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
93 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
97 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
99 define void @test_sf_vc_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
100 ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4:
101 ; CHECK: # %bb.0: # %entry
102 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
103 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
106 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
110 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
112 define void @test_sf_vc_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
113 ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2:
114 ; CHECK: # %bb.0: # %entry
115 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
116 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
119 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
123 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
125 define void @test_sf_vc_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
126 ; CHECK-LABEL: test_sf_vc_vvv_se_e16m1:
127 ; CHECK: # %bb.0: # %entry
128 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
129 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
132 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
136 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
138 define void @test_sf_vc_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
139 ; CHECK-LABEL: test_sf_vc_vvv_se_e16m2:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
142 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
145 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
149 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
151 define void @test_sf_vc_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
152 ; CHECK-LABEL: test_sf_vc_vvv_se_e16m4:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
155 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
158 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
162 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
164 define void @test_sf_vc_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
165 ; CHECK-LABEL: test_sf_vc_vvv_se_e16m8:
166 ; CHECK: # %bb.0: # %entry
167 ; CHECK-NEXT: vl8re16.v v24, (a0)
168 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
169 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
172 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
176 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
178 define void @test_sf_vc_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
179 ; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
182 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
185 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
189 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
191 define void @test_sf_vc_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
192 ; CHECK-LABEL: test_sf_vc_vvv_se_e32m1:
193 ; CHECK: # %bb.0: # %entry
194 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
195 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
198 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
202 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
204 define void @test_sf_vc_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
205 ; CHECK-LABEL: test_sf_vc_vvv_se_e32m2:
206 ; CHECK: # %bb.0: # %entry
207 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
208 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
211 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
215 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
217 define void @test_sf_vc_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
218 ; CHECK-LABEL: test_sf_vc_vvv_se_e32m4:
219 ; CHECK: # %bb.0: # %entry
220 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
221 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
224 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
228 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
230 define void @test_sf_vc_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
231 ; CHECK-LABEL: test_sf_vc_vvv_se_e32m8:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vl8re32.v v24, (a0)
234 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
235 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
238 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
242 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
244 define void @test_sf_vc_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
245 ; CHECK-LABEL: test_sf_vc_vvv_se_e64m1:
246 ; CHECK: # %bb.0: # %entry
247 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
248 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
251 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
255 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
257 define void @test_sf_vc_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
258 ; CHECK-LABEL: test_sf_vc_vvv_se_e64m2:
259 ; CHECK: # %bb.0: # %entry
260 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
261 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
264 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
268 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
270 define void @test_sf_vc_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
271 ; CHECK-LABEL: test_sf_vc_vvv_se_e64m4:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
274 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
277 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
281 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
283 define void @test_sf_vc_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
284 ; CHECK-LABEL: test_sf_vc_vvv_se_e64m8:
285 ; CHECK: # %bb.0: # %entry
286 ; CHECK-NEXT: vl8re64.v v24, (a0)
287 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
288 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
291 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
295 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
297 define <vscale x 1 x i8> @test_sf_vc_v_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
298 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8:
299 ; CHECK: # %bb.0: # %entry
300 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
301 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
304 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
305 ret <vscale x 1 x i8> %0
308 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
310 define <vscale x 2 x i8> @test_sf_vc_v_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
311 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4:
312 ; CHECK: # %bb.0: # %entry
313 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
314 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
317 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
318 ret <vscale x 2 x i8> %0
321 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
323 define <vscale x 4 x i8> @test_sf_vc_v_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
324 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2:
325 ; CHECK: # %bb.0: # %entry
326 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
327 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
330 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
331 ret <vscale x 4 x i8> %0
334 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
336 define <vscale x 8 x i8> @test_sf_vc_v_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
337 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1:
338 ; CHECK: # %bb.0: # %entry
339 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
340 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
343 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
344 ret <vscale x 8 x i8> %0
347 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
349 define <vscale x 16 x i8> @test_sf_vc_v_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
350 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2:
351 ; CHECK: # %bb.0: # %entry
352 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
353 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
356 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
357 ret <vscale x 16 x i8> %0
360 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
362 define <vscale x 32 x i8> @test_sf_vc_v_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
363 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4:
364 ; CHECK: # %bb.0: # %entry
365 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
366 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
369 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
370 ret <vscale x 32 x i8> %0
373 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
375 define <vscale x 64 x i8> @test_sf_vc_v_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
376 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8:
377 ; CHECK: # %bb.0: # %entry
378 ; CHECK-NEXT: vl8r.v v24, (a0)
379 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
380 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
383 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
384 ret <vscale x 64 x i8> %0
387 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
389 define <vscale x 1 x i16> @test_sf_vc_v_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
390 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4:
391 ; CHECK: # %bb.0: # %entry
392 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
393 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
396 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
397 ret <vscale x 1 x i16> %0
400 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
402 define <vscale x 2 x i16> @test_sf_vc_v_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
403 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2:
404 ; CHECK: # %bb.0: # %entry
405 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
406 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
409 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
410 ret <vscale x 2 x i16> %0
413 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
415 define <vscale x 4 x i16> @test_sf_vc_v_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
416 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1:
417 ; CHECK: # %bb.0: # %entry
418 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
419 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
422 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
423 ret <vscale x 4 x i16> %0
426 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
428 define <vscale x 8 x i16> @test_sf_vc_v_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
429 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
432 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
435 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
436 ret <vscale x 8 x i16> %0
439 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
441 define <vscale x 16 x i16> @test_sf_vc_v_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
442 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4:
443 ; CHECK: # %bb.0: # %entry
444 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
445 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
448 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
449 ret <vscale x 16 x i16> %0
452 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
454 define <vscale x 32 x i16> @test_sf_vc_v_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
455 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8:
456 ; CHECK: # %bb.0: # %entry
457 ; CHECK-NEXT: vl8re16.v v24, (a0)
458 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
459 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
462 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
463 ret <vscale x 32 x i16> %0
466 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
468 define <vscale x 1 x i32> @test_sf_vc_v_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
469 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2:
470 ; CHECK: # %bb.0: # %entry
471 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
472 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
475 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
476 ret <vscale x 1 x i32> %0
479 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
481 define <vscale x 2 x i32> @test_sf_vc_v_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
482 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1:
483 ; CHECK: # %bb.0: # %entry
484 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
485 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
488 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
489 ret <vscale x 2 x i32> %0
492 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
494 define <vscale x 4 x i32> @test_sf_vc_v_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
495 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2:
496 ; CHECK: # %bb.0: # %entry
497 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
498 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
501 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
502 ret <vscale x 4 x i32> %0
505 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
507 define <vscale x 8 x i32> @test_sf_vc_v_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
508 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4:
509 ; CHECK: # %bb.0: # %entry
510 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
511 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
514 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
515 ret <vscale x 8 x i32> %0
518 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
520 define <vscale x 16 x i32> @test_sf_vc_v_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
521 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8:
522 ; CHECK: # %bb.0: # %entry
523 ; CHECK-NEXT: vl8re32.v v24, (a0)
524 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
525 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
528 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
529 ret <vscale x 16 x i32> %0
532 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
534 define <vscale x 1 x i64> @test_sf_vc_v_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
535 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1:
536 ; CHECK: # %bb.0: # %entry
537 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
538 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
541 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
542 ret <vscale x 1 x i64> %0
545 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
547 define <vscale x 2 x i64> @test_sf_vc_v_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
548 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2:
549 ; CHECK: # %bb.0: # %entry
550 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
551 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
554 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
555 ret <vscale x 2 x i64> %0
558 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
560 define <vscale x 4 x i64> @test_sf_vc_v_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
561 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4:
562 ; CHECK: # %bb.0: # %entry
563 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
564 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
567 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
568 ret <vscale x 4 x i64> %0
571 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
573 define <vscale x 8 x i64> @test_sf_vc_v_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
574 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8:
575 ; CHECK: # %bb.0: # %entry
576 ; CHECK-NEXT: vl8re64.v v24, (a0)
577 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
578 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
581 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
582 ret <vscale x 8 x i64> %0
585 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
587 define <vscale x 1 x i8> @test_sf_vc_v_vvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
588 ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8:
589 ; CHECK: # %bb.0: # %entry
590 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
591 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
594 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
595 ret <vscale x 1 x i8> %0
598 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
600 define <vscale x 2 x i8> @test_sf_vc_v_vvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
601 ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4:
602 ; CHECK: # %bb.0: # %entry
603 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
604 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
607 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
608 ret <vscale x 2 x i8> %0
611 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
613 define <vscale x 4 x i8> @test_sf_vc_v_vvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
614 ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
617 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
620 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
621 ret <vscale x 4 x i8> %0
624 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
626 define <vscale x 8 x i8> @test_sf_vc_v_vvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
627 ; CHECK-LABEL: test_sf_vc_v_vvv_e8m1:
628 ; CHECK: # %bb.0: # %entry
629 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
630 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
633 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
634 ret <vscale x 8 x i8> %0
637 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
639 define <vscale x 16 x i8> @test_sf_vc_v_vvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
640 ; CHECK-LABEL: test_sf_vc_v_vvv_e8m2:
641 ; CHECK: # %bb.0: # %entry
642 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
643 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
646 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
647 ret <vscale x 16 x i8> %0
650 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
652 define <vscale x 32 x i8> @test_sf_vc_v_vvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
653 ; CHECK-LABEL: test_sf_vc_v_vvv_e8m4:
654 ; CHECK: # %bb.0: # %entry
655 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
656 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
659 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
660 ret <vscale x 32 x i8> %0
663 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
665 define <vscale x 64 x i8> @test_sf_vc_v_vvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
666 ; CHECK-LABEL: test_sf_vc_v_vvv_e8m8:
667 ; CHECK: # %bb.0: # %entry
668 ; CHECK-NEXT: vl8r.v v24, (a0)
669 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
670 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
673 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
674 ret <vscale x 64 x i8> %0
677 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
679 define <vscale x 1 x i16> @test_sf_vc_v_vvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
680 ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4:
681 ; CHECK: # %bb.0: # %entry
682 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
683 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
686 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
687 ret <vscale x 1 x i16> %0
690 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
692 define <vscale x 2 x i16> @test_sf_vc_v_vvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
693 ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2:
694 ; CHECK: # %bb.0: # %entry
695 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
696 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
699 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
700 ret <vscale x 2 x i16> %0
703 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
705 define <vscale x 4 x i16> @test_sf_vc_v_vvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
706 ; CHECK-LABEL: test_sf_vc_v_vvv_e16m1:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
709 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
712 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
713 ret <vscale x 4 x i16> %0
716 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
718 define <vscale x 8 x i16> @test_sf_vc_v_vvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
719 ; CHECK-LABEL: test_sf_vc_v_vvv_e16m2:
720 ; CHECK: # %bb.0: # %entry
721 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
722 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
725 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
726 ret <vscale x 8 x i16> %0
729 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
731 define <vscale x 16 x i16> @test_sf_vc_v_vvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
732 ; CHECK-LABEL: test_sf_vc_v_vvv_e16m4:
733 ; CHECK: # %bb.0: # %entry
734 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
735 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
738 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
739 ret <vscale x 16 x i16> %0
742 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
744 define <vscale x 32 x i16> @test_sf_vc_v_vvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
745 ; CHECK-LABEL: test_sf_vc_v_vvv_e16m8:
746 ; CHECK: # %bb.0: # %entry
747 ; CHECK-NEXT: vl8re16.v v24, (a0)
748 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
749 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
752 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
753 ret <vscale x 32 x i16> %0
756 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
758 define <vscale x 1 x i32> @test_sf_vc_v_vvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
759 ; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2:
760 ; CHECK: # %bb.0: # %entry
761 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
762 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
765 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
766 ret <vscale x 1 x i32> %0
769 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
771 define <vscale x 2 x i32> @test_sf_vc_v_vvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
772 ; CHECK-LABEL: test_sf_vc_v_vvv_e32m1:
773 ; CHECK: # %bb.0: # %entry
774 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
775 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
778 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
779 ret <vscale x 2 x i32> %0
782 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
784 define <vscale x 4 x i32> @test_sf_vc_v_vvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
785 ; CHECK-LABEL: test_sf_vc_v_vvv_e32m2:
786 ; CHECK: # %bb.0: # %entry
787 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
788 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
791 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
792 ret <vscale x 4 x i32> %0
795 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
797 define <vscale x 8 x i32> @test_sf_vc_v_vvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
798 ; CHECK-LABEL: test_sf_vc_v_vvv_e32m4:
799 ; CHECK: # %bb.0: # %entry
800 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
801 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
804 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
805 ret <vscale x 8 x i32> %0
808 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
810 define <vscale x 16 x i32> @test_sf_vc_v_vvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
811 ; CHECK-LABEL: test_sf_vc_v_vvv_e32m8:
812 ; CHECK: # %bb.0: # %entry
813 ; CHECK-NEXT: vl8re32.v v24, (a0)
814 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
815 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
818 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
819 ret <vscale x 16 x i32> %0
822 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
824 define <vscale x 1 x i64> @test_sf_vc_v_vvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
825 ; CHECK-LABEL: test_sf_vc_v_vvv_e64m1:
826 ; CHECK: # %bb.0: # %entry
827 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
828 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
831 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
832 ret <vscale x 1 x i64> %0
835 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
837 define <vscale x 2 x i64> @test_sf_vc_v_vvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
838 ; CHECK-LABEL: test_sf_vc_v_vvv_e64m2:
839 ; CHECK: # %bb.0: # %entry
840 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
841 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
844 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
845 ret <vscale x 2 x i64> %0
848 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
850 define <vscale x 4 x i64> @test_sf_vc_v_vvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
851 ; CHECK-LABEL: test_sf_vc_v_vvv_e64m4:
852 ; CHECK: # %bb.0: # %entry
853 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
854 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
857 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
858 ret <vscale x 4 x i64> %0
861 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
863 define <vscale x 8 x i64> @test_sf_vc_v_vvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
864 ; CHECK-LABEL: test_sf_vc_v_vvv_e64m8:
865 ; CHECK: # %bb.0: # %entry
866 ; CHECK-NEXT: vl8re64.v v24, (a0)
867 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
868 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
871 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
872 ret <vscale x 8 x i64> %0
875 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
877 define void @test_sf_vc_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
878 ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8:
879 ; CHECK: # %bb.0: # %entry
880 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
881 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
884 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
888 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
890 define void @test_sf_vc_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
891 ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4:
892 ; CHECK: # %bb.0: # %entry
893 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
894 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
897 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
901 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
903 define void @test_sf_vc_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
904 ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2:
905 ; CHECK: # %bb.0: # %entry
906 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
907 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
910 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
914 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
916 define void @test_sf_vc_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
917 ; CHECK-LABEL: test_sf_vc_xvv_se_e8m1:
918 ; CHECK: # %bb.0: # %entry
919 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
920 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
923 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
927 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
929 define void @test_sf_vc_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
930 ; CHECK-LABEL: test_sf_vc_xvv_se_e8m2:
931 ; CHECK: # %bb.0: # %entry
932 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
933 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
936 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
940 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
942 define void @test_sf_vc_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
943 ; CHECK-LABEL: test_sf_vc_xvv_se_e8m4:
944 ; CHECK: # %bb.0: # %entry
945 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
946 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
949 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
953 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
955 define void @test_sf_vc_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
956 ; CHECK-LABEL: test_sf_vc_xvv_se_e8m8:
957 ; CHECK: # %bb.0: # %entry
958 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
959 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
962 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
966 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
968 define void @test_sf_vc_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
969 ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4:
970 ; CHECK: # %bb.0: # %entry
971 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
972 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
975 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
979 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
981 define void @test_sf_vc_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
982 ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2:
983 ; CHECK: # %bb.0: # %entry
984 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
985 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
988 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
992 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
994 define void @test_sf_vc_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
995 ; CHECK-LABEL: test_sf_vc_xvv_se_e16m1:
996 ; CHECK: # %bb.0: # %entry
997 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
998 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
1001 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1005 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1007 define void @test_sf_vc_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1008 ; CHECK-LABEL: test_sf_vc_xvv_se_e16m2:
1009 ; CHECK: # %bb.0: # %entry
1010 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1011 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
1014 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1018 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1020 define void @test_sf_vc_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1021 ; CHECK-LABEL: test_sf_vc_xvv_se_e16m4:
1022 ; CHECK: # %bb.0: # %entry
1023 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1024 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
1027 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1031 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1033 define void @test_sf_vc_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1034 ; CHECK-LABEL: test_sf_vc_xvv_se_e16m8:
1035 ; CHECK: # %bb.0: # %entry
1036 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1037 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
1040 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1044 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1046 define void @test_sf_vc_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1047 ; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2:
1048 ; CHECK: # %bb.0: # %entry
1049 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1050 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
1053 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1057 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1059 define void @test_sf_vc_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1060 ; CHECK-LABEL: test_sf_vc_xvv_se_e32m1:
1061 ; CHECK: # %bb.0: # %entry
1062 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1063 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
1066 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1070 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1072 define void @test_sf_vc_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1073 ; CHECK-LABEL: test_sf_vc_xvv_se_e32m2:
1074 ; CHECK: # %bb.0: # %entry
1075 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1076 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
1079 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1083 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1085 define void @test_sf_vc_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1086 ; CHECK-LABEL: test_sf_vc_xvv_se_e32m4:
1087 ; CHECK: # %bb.0: # %entry
1088 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1089 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
1092 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1096 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1098 define void @test_sf_vc_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1099 ; CHECK-LABEL: test_sf_vc_xvv_se_e32m8:
1100 ; CHECK: # %bb.0: # %entry
1101 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1102 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
1105 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1109 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1111 define <vscale x 1 x i8> @test_sf_vc_v_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1112 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8:
1113 ; CHECK: # %bb.0: # %entry
1114 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1115 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1118 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
1119 ret <vscale x 1 x i8> %0
1122 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
1124 define <vscale x 2 x i8> @test_sf_vc_v_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1125 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4:
1126 ; CHECK: # %bb.0: # %entry
1127 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1128 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1131 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1132 ret <vscale x 2 x i8> %0
1135 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
1137 define <vscale x 4 x i8> @test_sf_vc_v_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1138 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2:
1139 ; CHECK: # %bb.0: # %entry
1140 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1141 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1144 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1145 ret <vscale x 4 x i8> %0
1148 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
1150 define <vscale x 8 x i8> @test_sf_vc_v_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1151 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1:
1152 ; CHECK: # %bb.0: # %entry
1153 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1154 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1157 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1158 ret <vscale x 8 x i8> %0
1161 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
1163 define <vscale x 16 x i8> @test_sf_vc_v_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1164 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2:
1165 ; CHECK: # %bb.0: # %entry
1166 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1167 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1170 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1171 ret <vscale x 16 x i8> %0
1174 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
1176 define <vscale x 32 x i8> @test_sf_vc_v_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1177 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4:
1178 ; CHECK: # %bb.0: # %entry
1179 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1180 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1183 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1184 ret <vscale x 32 x i8> %0
1187 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
1189 define <vscale x 64 x i8> @test_sf_vc_v_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1190 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8:
1191 ; CHECK: # %bb.0: # %entry
1192 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1193 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1196 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
1197 ret <vscale x 64 x i8> %0
1200 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
1202 define <vscale x 1 x i16> @test_sf_vc_v_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1203 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4:
1204 ; CHECK: # %bb.0: # %entry
1205 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1206 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1209 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1210 ret <vscale x 1 x i16> %0
1213 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
1215 define <vscale x 2 x i16> @test_sf_vc_v_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1216 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2:
1217 ; CHECK: # %bb.0: # %entry
1218 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1219 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1222 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1223 ret <vscale x 2 x i16> %0
1226 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
1228 define <vscale x 4 x i16> @test_sf_vc_v_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1229 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1:
1230 ; CHECK: # %bb.0: # %entry
1231 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1232 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1235 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1236 ret <vscale x 4 x i16> %0
1239 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1241 define <vscale x 8 x i16> @test_sf_vc_v_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1242 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2:
1243 ; CHECK: # %bb.0: # %entry
1244 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1245 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1248 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1249 ret <vscale x 8 x i16> %0
1252 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1254 define <vscale x 16 x i16> @test_sf_vc_v_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1255 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4:
1256 ; CHECK: # %bb.0: # %entry
1257 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1258 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1261 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1262 ret <vscale x 16 x i16> %0
1265 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1267 define <vscale x 32 x i16> @test_sf_vc_v_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1268 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8:
1269 ; CHECK: # %bb.0: # %entry
1270 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1271 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1274 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1275 ret <vscale x 32 x i16> %0
1278 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1280 define <vscale x 1 x i32> @test_sf_vc_v_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1281 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2:
1282 ; CHECK: # %bb.0: # %entry
1283 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1284 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1287 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1288 ret <vscale x 1 x i32> %0
1291 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1293 define <vscale x 2 x i32> @test_sf_vc_v_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1294 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1:
1295 ; CHECK: # %bb.0: # %entry
1296 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1297 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1300 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1301 ret <vscale x 2 x i32> %0
1304 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1306 define <vscale x 4 x i32> @test_sf_vc_v_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1307 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2:
1308 ; CHECK: # %bb.0: # %entry
1309 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1310 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1313 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1314 ret <vscale x 4 x i32> %0
1317 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1319 define <vscale x 8 x i32> @test_sf_vc_v_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1320 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4:
1321 ; CHECK: # %bb.0: # %entry
1322 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1323 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1326 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1327 ret <vscale x 8 x i32> %0
1330 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1332 define <vscale x 16 x i32> @test_sf_vc_v_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1333 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8:
1334 ; CHECK: # %bb.0: # %entry
1335 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1336 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1339 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1340 ret <vscale x 16 x i32> %0
1343 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1345 define <vscale x 1 x i8> @test_sf_vc_v_xvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1346 ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8:
1347 ; CHECK: # %bb.0: # %entry
1348 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1349 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1352 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
1353 ret <vscale x 1 x i8> %0
1356 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
1358 define <vscale x 2 x i8> @test_sf_vc_v_xvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1359 ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4:
1360 ; CHECK: # %bb.0: # %entry
1361 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1362 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1365 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1366 ret <vscale x 2 x i8> %0
1369 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
1371 define <vscale x 4 x i8> @test_sf_vc_v_xvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1372 ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2:
1373 ; CHECK: # %bb.0: # %entry
1374 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1375 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1378 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1379 ret <vscale x 4 x i8> %0
1382 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
1384 define <vscale x 8 x i8> @test_sf_vc_v_xvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1385 ; CHECK-LABEL: test_sf_vc_v_xvv_e8m1:
1386 ; CHECK: # %bb.0: # %entry
1387 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1388 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1391 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1392 ret <vscale x 8 x i8> %0
1395 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
1397 define <vscale x 16 x i8> @test_sf_vc_v_xvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1398 ; CHECK-LABEL: test_sf_vc_v_xvv_e8m2:
1399 ; CHECK: # %bb.0: # %entry
1400 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1401 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1404 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1405 ret <vscale x 16 x i8> %0
1408 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
1410 define <vscale x 32 x i8> @test_sf_vc_v_xvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1411 ; CHECK-LABEL: test_sf_vc_v_xvv_e8m4:
1412 ; CHECK: # %bb.0: # %entry
1413 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1414 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1417 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1418 ret <vscale x 32 x i8> %0
1421 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
1423 define <vscale x 64 x i8> @test_sf_vc_v_xvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1424 ; CHECK-LABEL: test_sf_vc_v_xvv_e8m8:
1425 ; CHECK: # %bb.0: # %entry
1426 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1427 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1430 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
1431 ret <vscale x 64 x i8> %0
1434 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
1436 define <vscale x 1 x i16> @test_sf_vc_v_xvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1437 ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4:
1438 ; CHECK: # %bb.0: # %entry
1439 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1440 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1443 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1444 ret <vscale x 1 x i16> %0
1447 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
1449 define <vscale x 2 x i16> @test_sf_vc_v_xvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1450 ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2:
1451 ; CHECK: # %bb.0: # %entry
1452 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1453 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1456 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1457 ret <vscale x 2 x i16> %0
1460 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
1462 define <vscale x 4 x i16> @test_sf_vc_v_xvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1463 ; CHECK-LABEL: test_sf_vc_v_xvv_e16m1:
1464 ; CHECK: # %bb.0: # %entry
1465 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1466 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1469 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1470 ret <vscale x 4 x i16> %0
1473 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1475 define <vscale x 8 x i16> @test_sf_vc_v_xvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1476 ; CHECK-LABEL: test_sf_vc_v_xvv_e16m2:
1477 ; CHECK: # %bb.0: # %entry
1478 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1479 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1482 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1483 ret <vscale x 8 x i16> %0
1486 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1488 define <vscale x 16 x i16> @test_sf_vc_v_xvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1489 ; CHECK-LABEL: test_sf_vc_v_xvv_e16m4:
1490 ; CHECK: # %bb.0: # %entry
1491 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1492 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1495 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1496 ret <vscale x 16 x i16> %0
1499 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1501 define <vscale x 32 x i16> @test_sf_vc_v_xvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1502 ; CHECK-LABEL: test_sf_vc_v_xvv_e16m8:
1503 ; CHECK: # %bb.0: # %entry
1504 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1505 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1508 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1509 ret <vscale x 32 x i16> %0
1512 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1514 define <vscale x 1 x i32> @test_sf_vc_v_xvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1515 ; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2:
1516 ; CHECK: # %bb.0: # %entry
1517 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1518 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1521 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1522 ret <vscale x 1 x i32> %0
1525 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1527 define <vscale x 2 x i32> @test_sf_vc_v_xvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1528 ; CHECK-LABEL: test_sf_vc_v_xvv_e32m1:
1529 ; CHECK: # %bb.0: # %entry
1530 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1531 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1534 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1535 ret <vscale x 2 x i32> %0
1538 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1540 define <vscale x 4 x i32> @test_sf_vc_v_xvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1541 ; CHECK-LABEL: test_sf_vc_v_xvv_e32m2:
1542 ; CHECK: # %bb.0: # %entry
1543 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1544 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1547 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1548 ret <vscale x 4 x i32> %0
1551 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1553 define <vscale x 8 x i32> @test_sf_vc_v_xvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1554 ; CHECK-LABEL: test_sf_vc_v_xvv_e32m4:
1555 ; CHECK: # %bb.0: # %entry
1556 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1557 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1560 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1561 ret <vscale x 8 x i32> %0
1564 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1566 define <vscale x 16 x i32> @test_sf_vc_v_xvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1567 ; CHECK-LABEL: test_sf_vc_v_xvv_e32m8:
1568 ; CHECK: # %bb.0: # %entry
1569 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1570 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1573 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1574 ret <vscale x 16 x i32> %0
1577 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1579 define void @test_sf_vc_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1580 ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8:
1581 ; CHECK: # %bb.0: # %entry
1582 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1583 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1586 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1590 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
1592 define void @test_sf_vc_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1593 ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4:
1594 ; CHECK: # %bb.0: # %entry
1595 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1596 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1599 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1603 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
1605 define void @test_sf_vc_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1606 ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2:
1607 ; CHECK: # %bb.0: # %entry
1608 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1609 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1612 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1616 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
1618 define void @test_sf_vc_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1619 ; CHECK-LABEL: test_sf_vc_ivv_se_e8m1:
1620 ; CHECK: # %bb.0: # %entry
1621 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1622 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1625 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1629 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
1631 define void @test_sf_vc_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1632 ; CHECK-LABEL: test_sf_vc_ivv_se_e8m2:
1633 ; CHECK: # %bb.0: # %entry
1634 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1635 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
1638 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1642 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
1644 define void @test_sf_vc_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1645 ; CHECK-LABEL: test_sf_vc_ivv_se_e8m4:
1646 ; CHECK: # %bb.0: # %entry
1647 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1648 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
1651 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1655 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
1657 define void @test_sf_vc_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
1658 ; CHECK-LABEL: test_sf_vc_ivv_se_e8m8:
1659 ; CHECK: # %bb.0: # %entry
1660 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1661 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
1664 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
1668 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
1670 define void @test_sf_vc_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1671 ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4:
1672 ; CHECK: # %bb.0: # %entry
1673 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1674 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1677 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1681 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
1683 define void @test_sf_vc_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1684 ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2:
1685 ; CHECK: # %bb.0: # %entry
1686 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1687 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1690 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1694 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
1696 define void @test_sf_vc_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1697 ; CHECK-LABEL: test_sf_vc_ivv_se_e16m1:
1698 ; CHECK: # %bb.0: # %entry
1699 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1700 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1703 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1707 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
1709 define void @test_sf_vc_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1710 ; CHECK-LABEL: test_sf_vc_ivv_se_e16m2:
1711 ; CHECK: # %bb.0: # %entry
1712 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1713 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
1716 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1720 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
1722 define void @test_sf_vc_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1723 ; CHECK-LABEL: test_sf_vc_ivv_se_e16m4:
1724 ; CHECK: # %bb.0: # %entry
1725 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1726 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
1729 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1733 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
1735 define void @test_sf_vc_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
1736 ; CHECK-LABEL: test_sf_vc_ivv_se_e16m8:
1737 ; CHECK: # %bb.0: # %entry
1738 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1739 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
1742 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
1746 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
1748 define void @test_sf_vc_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1749 ; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2:
1750 ; CHECK: # %bb.0: # %entry
1751 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1752 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1755 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1759 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
1761 define void @test_sf_vc_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1762 ; CHECK-LABEL: test_sf_vc_ivv_se_e32m1:
1763 ; CHECK: # %bb.0: # %entry
1764 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1765 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1768 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1772 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
1774 define void @test_sf_vc_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1775 ; CHECK-LABEL: test_sf_vc_ivv_se_e32m2:
1776 ; CHECK: # %bb.0: # %entry
1777 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1778 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
1781 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1785 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
1787 define void @test_sf_vc_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1788 ; CHECK-LABEL: test_sf_vc_ivv_se_e32m4:
1789 ; CHECK: # %bb.0: # %entry
1790 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1791 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
1794 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1798 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
1800 define void @test_sf_vc_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
1801 ; CHECK-LABEL: test_sf_vc_ivv_se_e32m8:
1802 ; CHECK: # %bb.0: # %entry
1803 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1804 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
1807 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
1811 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
1813 define void @test_sf_vc_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
1814 ; CHECK-LABEL: test_sf_vc_ivv_se_e64m1:
1815 ; CHECK: # %bb.0: # %entry
1816 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1817 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1820 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
1824 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
1826 define void @test_sf_vc_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
1827 ; CHECK-LABEL: test_sf_vc_ivv_se_e64m2:
1828 ; CHECK: # %bb.0: # %entry
1829 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1830 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
1833 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
1837 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
1839 define void @test_sf_vc_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
1840 ; CHECK-LABEL: test_sf_vc_ivv_se_e64m4:
1841 ; CHECK: # %bb.0: # %entry
1842 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1843 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
1846 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
1850 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
1852 define void @test_sf_vc_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
1853 ; CHECK-LABEL: test_sf_vc_ivv_se_e64m8:
1854 ; CHECK: # %bb.0: # %entry
1855 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1856 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
1859 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
1863 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
1865 define <vscale x 1 x i8> @test_sf_vc_v_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1866 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8:
1867 ; CHECK: # %bb.0: # %entry
1868 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1869 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1872 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1873 ret <vscale x 1 x i8> %0
1876 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
1878 define <vscale x 2 x i8> @test_sf_vc_v_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1879 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4:
1880 ; CHECK: # %bb.0: # %entry
1881 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1882 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1885 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1886 ret <vscale x 2 x i8> %0
1889 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
1891 define <vscale x 4 x i8> @test_sf_vc_v_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1892 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2:
1893 ; CHECK: # %bb.0: # %entry
1894 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1895 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1898 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1899 ret <vscale x 4 x i8> %0
1902 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
1904 define <vscale x 8 x i8> @test_sf_vc_v_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1905 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1:
1906 ; CHECK: # %bb.0: # %entry
1907 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1908 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1911 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1912 ret <vscale x 8 x i8> %0
1915 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
1917 define <vscale x 16 x i8> @test_sf_vc_v_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1918 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2:
1919 ; CHECK: # %bb.0: # %entry
1920 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1921 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
1924 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1925 ret <vscale x 16 x i8> %0
1928 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
1930 define <vscale x 32 x i8> @test_sf_vc_v_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1931 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4:
1932 ; CHECK: # %bb.0: # %entry
1933 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1934 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
1937 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1938 ret <vscale x 32 x i8> %0
1941 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
1943 define <vscale x 64 x i8> @test_sf_vc_v_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
1944 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8:
1945 ; CHECK: # %bb.0: # %entry
1946 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1947 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
1950 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
1951 ret <vscale x 64 x i8> %0
1954 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
1956 define <vscale x 1 x i16> @test_sf_vc_v_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1957 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4:
1958 ; CHECK: # %bb.0: # %entry
1959 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1960 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1963 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1964 ret <vscale x 1 x i16> %0
1967 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
1969 define <vscale x 2 x i16> @test_sf_vc_v_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1970 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2:
1971 ; CHECK: # %bb.0: # %entry
1972 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1973 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1976 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1977 ret <vscale x 2 x i16> %0
1980 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
1982 define <vscale x 4 x i16> @test_sf_vc_v_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1983 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1:
1984 ; CHECK: # %bb.0: # %entry
1985 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1986 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1989 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1990 ret <vscale x 4 x i16> %0
1993 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
1995 define <vscale x 8 x i16> @test_sf_vc_v_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1996 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2:
1997 ; CHECK: # %bb.0: # %entry
1998 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1999 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2002 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
2003 ret <vscale x 8 x i16> %0
2006 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
2008 define <vscale x 16 x i16> @test_sf_vc_v_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
2009 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4:
2010 ; CHECK: # %bb.0: # %entry
2011 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2012 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2015 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
2016 ret <vscale x 16 x i16> %0
2019 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
2021 define <vscale x 32 x i16> @test_sf_vc_v_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
2022 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8:
2023 ; CHECK: # %bb.0: # %entry
2024 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2025 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2028 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
2029 ret <vscale x 32 x i16> %0
2032 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
2034 define <vscale x 1 x i32> @test_sf_vc_v_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
2035 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2:
2036 ; CHECK: # %bb.0: # %entry
2037 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2038 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2041 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
2042 ret <vscale x 1 x i32> %0
2045 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
2047 define <vscale x 2 x i32> @test_sf_vc_v_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
2048 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1:
2049 ; CHECK: # %bb.0: # %entry
2050 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2051 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2054 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
2055 ret <vscale x 2 x i32> %0
2058 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
2060 define <vscale x 4 x i32> @test_sf_vc_v_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
2061 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2:
2062 ; CHECK: # %bb.0: # %entry
2063 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2064 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2067 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
2068 ret <vscale x 4 x i32> %0
2071 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
2073 define <vscale x 8 x i32> @test_sf_vc_v_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
2074 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4:
2075 ; CHECK: # %bb.0: # %entry
2076 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2077 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2080 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
2081 ret <vscale x 8 x i32> %0
2084 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
2086 define <vscale x 16 x i32> @test_sf_vc_v_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
2087 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8:
2088 ; CHECK: # %bb.0: # %entry
2089 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2090 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2093 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
2094 ret <vscale x 16 x i32> %0
2097 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
2099 define <vscale x 1 x i64> @test_sf_vc_v_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
2100 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1:
2101 ; CHECK: # %bb.0: # %entry
2102 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2103 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2106 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
2107 ret <vscale x 1 x i64> %0
2110 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
2112 define <vscale x 2 x i64> @test_sf_vc_v_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
2113 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2:
2114 ; CHECK: # %bb.0: # %entry
2115 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2116 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2119 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
2120 ret <vscale x 2 x i64> %0
2123 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
2125 define <vscale x 4 x i64> @test_sf_vc_v_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
2126 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4:
2127 ; CHECK: # %bb.0: # %entry
2128 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2129 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2132 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
2133 ret <vscale x 4 x i64> %0
2136 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
2138 define <vscale x 8 x i64> @test_sf_vc_v_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
2139 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8:
2140 ; CHECK: # %bb.0: # %entry
2141 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2142 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2145 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
2146 ret <vscale x 8 x i64> %0
2149 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
2151 define <vscale x 1 x i8> @test_sf_vc_v_ivv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
2152 ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8:
2153 ; CHECK: # %bb.0: # %entry
2154 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2155 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2158 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
2159 ret <vscale x 1 x i8> %0
2162 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
2164 define <vscale x 2 x i8> @test_sf_vc_v_ivv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
2165 ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4:
2166 ; CHECK: # %bb.0: # %entry
2167 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2168 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2171 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
2172 ret <vscale x 2 x i8> %0
2175 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
2177 define <vscale x 4 x i8> @test_sf_vc_v_ivv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
2178 ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2:
2179 ; CHECK: # %bb.0: # %entry
2180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2181 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2184 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
2185 ret <vscale x 4 x i8> %0
2188 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
2190 define <vscale x 8 x i8> @test_sf_vc_v_ivv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
2191 ; CHECK-LABEL: test_sf_vc_v_ivv_e8m1:
2192 ; CHECK: # %bb.0: # %entry
2193 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2194 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2197 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
2198 ret <vscale x 8 x i8> %0
2201 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
2203 define <vscale x 16 x i8> @test_sf_vc_v_ivv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
2204 ; CHECK-LABEL: test_sf_vc_v_ivv_e8m2:
2205 ; CHECK: # %bb.0: # %entry
2206 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2207 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2210 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
2211 ret <vscale x 16 x i8> %0
2214 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
2216 define <vscale x 32 x i8> @test_sf_vc_v_ivv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
2217 ; CHECK-LABEL: test_sf_vc_v_ivv_e8m4:
2218 ; CHECK: # %bb.0: # %entry
2219 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2220 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2223 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
2224 ret <vscale x 32 x i8> %0
2227 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
2229 define <vscale x 64 x i8> @test_sf_vc_v_ivv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
2230 ; CHECK-LABEL: test_sf_vc_v_ivv_e8m8:
2231 ; CHECK: # %bb.0: # %entry
2232 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2233 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2236 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
2237 ret <vscale x 64 x i8> %0
2240 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
2242 define <vscale x 1 x i16> @test_sf_vc_v_ivv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
2243 ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4:
2244 ; CHECK: # %bb.0: # %entry
2245 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2246 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2249 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
2250 ret <vscale x 1 x i16> %0
2253 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
2255 define <vscale x 2 x i16> @test_sf_vc_v_ivv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
2256 ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2:
2257 ; CHECK: # %bb.0: # %entry
2258 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2259 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2262 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
2263 ret <vscale x 2 x i16> %0
2266 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
2268 define <vscale x 4 x i16> @test_sf_vc_v_ivv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
2269 ; CHECK-LABEL: test_sf_vc_v_ivv_e16m1:
2270 ; CHECK: # %bb.0: # %entry
2271 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2272 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2275 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
2276 ret <vscale x 4 x i16> %0
2279 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
2281 define <vscale x 8 x i16> @test_sf_vc_v_ivv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
2282 ; CHECK-LABEL: test_sf_vc_v_ivv_e16m2:
2283 ; CHECK: # %bb.0: # %entry
2284 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2285 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2288 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
2289 ret <vscale x 8 x i16> %0
2292 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
2294 define <vscale x 16 x i16> @test_sf_vc_v_ivv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
2295 ; CHECK-LABEL: test_sf_vc_v_ivv_e16m4:
2296 ; CHECK: # %bb.0: # %entry
2297 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2298 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2301 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
2302 ret <vscale x 16 x i16> %0
2305 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
2307 define <vscale x 32 x i16> @test_sf_vc_v_ivv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
2308 ; CHECK-LABEL: test_sf_vc_v_ivv_e16m8:
2309 ; CHECK: # %bb.0: # %entry
2310 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2311 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2314 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
2315 ret <vscale x 32 x i16> %0
2318 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
2320 define <vscale x 1 x i32> @test_sf_vc_v_ivv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
2321 ; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2:
2322 ; CHECK: # %bb.0: # %entry
2323 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2324 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2327 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
2328 ret <vscale x 1 x i32> %0
2331 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
2333 define <vscale x 2 x i32> @test_sf_vc_v_ivv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
2334 ; CHECK-LABEL: test_sf_vc_v_ivv_e32m1:
2335 ; CHECK: # %bb.0: # %entry
2336 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2337 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2340 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
2341 ret <vscale x 2 x i32> %0
2344 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
2346 define <vscale x 4 x i32> @test_sf_vc_v_ivv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
2347 ; CHECK-LABEL: test_sf_vc_v_ivv_e32m2:
2348 ; CHECK: # %bb.0: # %entry
2349 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2350 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2353 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
2354 ret <vscale x 4 x i32> %0
2357 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
2359 define <vscale x 8 x i32> @test_sf_vc_v_ivv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
2360 ; CHECK-LABEL: test_sf_vc_v_ivv_e32m4:
2361 ; CHECK: # %bb.0: # %entry
2362 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2363 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2366 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
2367 ret <vscale x 8 x i32> %0
2370 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
2372 define <vscale x 16 x i32> @test_sf_vc_v_ivv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
2373 ; CHECK-LABEL: test_sf_vc_v_ivv_e32m8:
2374 ; CHECK: # %bb.0: # %entry
2375 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2376 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2379 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
2380 ret <vscale x 16 x i32> %0
2383 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
2385 define <vscale x 1 x i64> @test_sf_vc_v_ivv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
2386 ; CHECK-LABEL: test_sf_vc_v_ivv_e64m1:
2387 ; CHECK: # %bb.0: # %entry
2388 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2389 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2392 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
2393 ret <vscale x 1 x i64> %0
2396 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
2398 define <vscale x 2 x i64> @test_sf_vc_v_ivv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
2399 ; CHECK-LABEL: test_sf_vc_v_ivv_e64m2:
2400 ; CHECK: # %bb.0: # %entry
2401 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2402 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2405 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
2406 ret <vscale x 2 x i64> %0
2409 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
2411 define <vscale x 4 x i64> @test_sf_vc_v_ivv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
2412 ; CHECK-LABEL: test_sf_vc_v_ivv_e64m4:
2413 ; CHECK: # %bb.0: # %entry
2414 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2415 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2418 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
2419 ret <vscale x 4 x i64> %0
2422 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
2424 define <vscale x 8 x i64> @test_sf_vc_v_ivv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
2425 ; CHECK-LABEL: test_sf_vc_v_ivv_e64m8:
2426 ; CHECK: # %bb.0: # %entry
2427 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2428 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2431 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
2432 ret <vscale x 8 x i64> %0
2435 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
2437 define void @test_sf_vc_fvvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
2438 ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf4:
2439 ; CHECK: # %bb.0: # %entry
2440 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2441 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
2444 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
2448 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
2450 define <vscale x 1 x half> @test_sf_vc_fv_fvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
2451 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf4:
2452 ; CHECK: # %bb.0: # %entry
2453 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2454 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
2457 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
2458 ret <vscale x 1 x half> %0
2461 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
2463 define void @test_sf_vc_fvvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
2464 ; CHECK-LABEL: test_sf_vc_fvvv_se_e16mf2:
2465 ; CHECK: # %bb.0: # %entry
2466 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2467 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
2470 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
2474 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
2476 define <vscale x 2 x half> @test_sf_vc_fv_fvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
2477 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16mf2:
2478 ; CHECK: # %bb.0: # %entry
2479 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2480 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
2483 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
2484 ret <vscale x 2 x half> %0
2487 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
2489 define void @test_sf_vc_fvvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
2490 ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m1:
2491 ; CHECK: # %bb.0: # %entry
2492 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2493 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
2496 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
2500 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
2502 define <vscale x 4 x half> @test_sf_vc_fv_fvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
2503 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m1:
2504 ; CHECK: # %bb.0: # %entry
2505 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2506 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
2509 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
2510 ret <vscale x 4 x half> %0
2513 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
2515 define void @test_sf_vc_fvvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
2516 ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m2:
2517 ; CHECK: # %bb.0: # %entry
2518 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2519 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
2522 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
2526 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
2528 define <vscale x 8 x half> @test_sf_vc_fv_fvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
2529 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m2:
2530 ; CHECK: # %bb.0: # %entry
2531 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2532 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
2535 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
2536 ret <vscale x 8 x half> %0
2539 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
2541 define void @test_sf_vc_fvvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
2542 ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m4:
2543 ; CHECK: # %bb.0: # %entry
2544 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2545 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
2548 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
2552 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
2554 define <vscale x 16 x half> @test_sf_vc_fv_fvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
2555 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m4:
2556 ; CHECK: # %bb.0: # %entry
2557 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2558 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
2561 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
2562 ret <vscale x 16 x half> %0
2565 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
2567 define void @test_sf_vc_fvvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
2568 ; CHECK-LABEL: test_sf_vc_fvvv_se_e16m8:
2569 ; CHECK: # %bb.0: # %entry
2570 ; CHECK-NEXT: vl8re16.v v24, (a0)
2571 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2572 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
2575 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
2579 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
2581 define <vscale x 32 x half> @test_sf_vc_fv_fvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
2582 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e16m8:
2583 ; CHECK: # %bb.0: # %entry
2584 ; CHECK-NEXT: vl8re16.v v24, (a0)
2585 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2586 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
2589 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
2590 ret <vscale x 32 x half> %0
2593 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.nxv32i16.nxv32i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
2595 define void @test_sf_vc_fvvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
2596 ; CHECK-LABEL: test_sf_vc_fvvv_se_e32mf2:
2597 ; CHECK: # %bb.0: # %entry
2598 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2599 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
2602 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
2606 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
2608 define <vscale x 1 x float> @test_sf_vc_fv_fvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
2609 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32mf2:
2610 ; CHECK: # %bb.0: # %entry
2611 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2612 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
2615 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
2616 ret <vscale x 1 x float> %0
2619 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
2621 define void @test_sf_vc_fvvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
2622 ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m1:
2623 ; CHECK: # %bb.0: # %entry
2624 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2625 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
2628 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
2632 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
2634 define <vscale x 2 x float> @test_sf_vc_fv_fvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
2635 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m1:
2636 ; CHECK: # %bb.0: # %entry
2637 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2638 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
2641 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
2642 ret <vscale x 2 x float> %0
2645 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
2647 define void @test_sf_vc_fvvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
2648 ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m2:
2649 ; CHECK: # %bb.0: # %entry
2650 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2651 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
2654 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
2658 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
2660 define <vscale x 4 x float> @test_sf_vc_fv_fvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
2661 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m2:
2662 ; CHECK: # %bb.0: # %entry
2663 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2664 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
2667 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
2668 ret <vscale x 4 x float> %0
2671 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
2673 define void @test_sf_vc_fvvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
2674 ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m4:
2675 ; CHECK: # %bb.0: # %entry
2676 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2677 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
2680 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
2684 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
2686 define <vscale x 8 x float> @test_sf_vc_fv_fvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
2687 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m4:
2688 ; CHECK: # %bb.0: # %entry
2689 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2690 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
2693 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
2694 ret <vscale x 8 x float> %0
2697 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
2699 define void @test_sf_vc_fvvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
2700 ; CHECK-LABEL: test_sf_vc_fvvv_se_e32m8:
2701 ; CHECK: # %bb.0: # %entry
2702 ; CHECK-NEXT: vl8re32.v v24, (a0)
2703 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2704 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
2707 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
2711 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
2713 define <vscale x 16 x float> @test_sf_vc_fv_fvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
2714 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e32m8:
2715 ; CHECK: # %bb.0: # %entry
2716 ; CHECK-NEXT: vl8re32.v v24, (a0)
2717 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
2718 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
2721 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
2722 ret <vscale x 16 x float> %0
2725 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.nxv16i32.nxv16i32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
2727 define void @test_sf_vc_fvvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
2728 ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m1:
2729 ; CHECK: # %bb.0: # %entry
2730 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2731 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
2734 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
2738 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
2740 define <vscale x 1 x double> @test_sf_vc_fv_fvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
2741 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m1:
2742 ; CHECK: # %bb.0: # %entry
2743 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2744 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
2747 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
2748 ret <vscale x 1 x double> %0
2751 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.nxv1i64.nxv1i64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
2753 define void @test_sf_vc_fvvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
2754 ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m2:
2755 ; CHECK: # %bb.0: # %entry
2756 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2757 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
2760 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
2764 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
2766 define <vscale x 2 x double> @test_sf_vc_fv_fvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
2767 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m2:
2768 ; CHECK: # %bb.0: # %entry
2769 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2770 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
2773 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
2774 ret <vscale x 2 x double> %0
2777 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.nxv2i64.nxv2i64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
2779 define void @test_sf_vc_fvvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
2780 ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m4:
2781 ; CHECK: # %bb.0: # %entry
2782 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2783 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
2786 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
2790 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
2792 define <vscale x 4 x double> @test_sf_vc_fv_fvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
2793 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m4:
2794 ; CHECK: # %bb.0: # %entry
2795 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2796 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
2799 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
2800 ret <vscale x 4 x double> %0
2803 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.nxv4i64.nxv4i64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
2805 define void @test_sf_vc_fvvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
2806 ; CHECK-LABEL: test_sf_vc_fvvv_se_e64m8:
2807 ; CHECK: # %bb.0: # %entry
2808 ; CHECK-NEXT: vl8re64.v v24, (a0)
2809 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2810 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
2813 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
2817 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
2819 define <vscale x 8 x double> @test_sf_vc_fv_fvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
2820 ; CHECK-LABEL: test_sf_vc_fv_fvv_se_e64m8:
2821 ; CHECK: # %bb.0: # %entry
2822 ; CHECK-NEXT: vl8re64.v v24, (a0)
2823 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
2824 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
2827 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
2828 ret <vscale x 8 x double> %0
2831 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.nxv8i64.nxv8i64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
2833 define void @test_sf_vc_fvvx_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl) {
2834 ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf4:
2835 ; CHECK: # %bb.0: # %entry
2836 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2837 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
2840 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
2844 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, i16, iXLen)
2846 define <vscale x 1 x half> @test_sf_vc_v_fvvx_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl) {
2847 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf4:
2848 ; CHECK: # %bb.0: # %entry
2849 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2850 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
2853 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
2854 ret <vscale x 1 x half> %0
2857 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.nxv1f16.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, i16, iXLen)
2859 define void @test_sf_vc_fvvx_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl) {
2860 ; CHECK-LABEL: test_sf_vc_fvvx_se_e16mf2:
2861 ; CHECK: # %bb.0: # %entry
2862 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2863 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
2866 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
2870 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, i16, iXLen)
2872 define <vscale x 2 x half> @test_sf_vc_v_fvvx_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl) {
2873 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16mf2:
2874 ; CHECK: # %bb.0: # %entry
2875 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2876 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
2879 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
2880 ret <vscale x 2 x half> %0
2883 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.nxv2f16.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, i16, iXLen)
2885 define void @test_sf_vc_fvvx_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl) {
2886 ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m1:
2887 ; CHECK: # %bb.0: # %entry
2888 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2889 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
2892 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
2896 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, i16, iXLen)
2898 define <vscale x 4 x half> @test_sf_vc_v_fvvx_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl) {
2899 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m1:
2900 ; CHECK: # %bb.0: # %entry
2901 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2902 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
2905 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
2906 ret <vscale x 4 x half> %0
2909 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.nxv4f16.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, i16, iXLen)
2911 define void @test_sf_vc_fvvx_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl) {
2912 ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m2:
2913 ; CHECK: # %bb.0: # %entry
2914 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2915 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
2918 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
2922 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, i16, iXLen)
2924 define <vscale x 8 x half> @test_sf_vc_v_fvvx_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl) {
2925 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m2:
2926 ; CHECK: # %bb.0: # %entry
2927 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2928 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
2931 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
2932 ret <vscale x 8 x half> %0
2935 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.nxv8f16.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, i16, iXLen)
2937 define void @test_sf_vc_fvvx_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl) {
2938 ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m4:
2939 ; CHECK: # %bb.0: # %entry
2940 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2941 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
2944 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
2948 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, i16, iXLen)
2950 define <vscale x 16 x half> @test_sf_vc_v_fvvx_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl) {
2951 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m4:
2952 ; CHECK: # %bb.0: # %entry
2953 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2954 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
2957 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
2958 ret <vscale x 16 x half> %0
2961 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.nxv16f16.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, i16, iXLen)
2963 define void @test_sf_vc_fvvx_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl) {
2964 ; CHECK-LABEL: test_sf_vc_fvvx_se_e16m8:
2965 ; CHECK: # %bb.0: # %entry
2966 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2967 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
2970 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
2974 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, i16, iXLen)
2976 define <vscale x 32 x half> @test_sf_vc_v_fvvx_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl) {
2977 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e16m8:
2978 ; CHECK: # %bb.0: # %entry
2979 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
2980 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
2983 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
2984 ret <vscale x 32 x half> %0
2987 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.nxv32f16.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, i16, iXLen)
2989 define void @test_sf_vc_fvvx_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl) {
2990 ; CHECK-LABEL: test_sf_vc_fvvx_se_e32mf2:
2991 ; CHECK: # %bb.0: # %entry
2992 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2993 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
2996 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
3000 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f32.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, i32, iXLen)
3002 define <vscale x 1 x float> @test_sf_vc_v_fvvx_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl) {
3003 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32mf2:
3004 ; CHECK: # %bb.0: # %entry
3005 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3006 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3009 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
3010 ret <vscale x 1 x float> %0
3013 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.nxv1f32.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, i32, iXLen)
3015 define void @test_sf_vc_fvvx_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl) {
3016 ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m1:
3017 ; CHECK: # %bb.0: # %entry
3018 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3019 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
3022 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
3026 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f32.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, i32, iXLen)
3028 define <vscale x 2 x float> @test_sf_vc_v_fvvx_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl) {
3029 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m1:
3030 ; CHECK: # %bb.0: # %entry
3031 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3032 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3035 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
3036 ret <vscale x 2 x float> %0
3039 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.nxv2f32.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, i32, iXLen)
3041 define void @test_sf_vc_fvvx_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl) {
3042 ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m2:
3043 ; CHECK: # %bb.0: # %entry
3044 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3045 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
3048 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
3052 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f32.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, i32, iXLen)
3054 define <vscale x 4 x float> @test_sf_vc_v_fvvx_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl) {
3055 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m2:
3056 ; CHECK: # %bb.0: # %entry
3057 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3058 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
3061 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
3062 ret <vscale x 4 x float> %0
3065 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.nxv4f32.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, i32, iXLen)
3067 define void @test_sf_vc_fvvx_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl) {
3068 ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m4:
3069 ; CHECK: # %bb.0: # %entry
3070 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3071 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
3074 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
3078 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f32.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, i32, iXLen)
3080 define <vscale x 8 x float> @test_sf_vc_v_fvvx_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl) {
3081 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m4:
3082 ; CHECK: # %bb.0: # %entry
3083 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3084 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
3087 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
3088 ret <vscale x 8 x float> %0
3091 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.nxv8f32.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, i32, iXLen)
3093 define void @test_sf_vc_fvvx_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl) {
3094 ; CHECK-LABEL: test_sf_vc_fvvx_se_e32m8:
3095 ; CHECK: # %bb.0: # %entry
3096 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3097 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
3100 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
3104 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f32.nxv16i32.i32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, i32, iXLen)
3106 define <vscale x 16 x float> @test_sf_vc_v_fvvx_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl) {
3107 ; CHECK-LABEL: test_sf_vc_v_fvvx_se_e32m8:
3108 ; CHECK: # %bb.0: # %entry
3109 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3110 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
3113 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
3114 ret <vscale x 16 x float> %0
3117 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.nxv16f32.nxv16i32.i32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, i32, iXLen)
3119 define void @test_sf_vc_fvvi_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
3120 ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf4:
3121 ; CHECK: # %bb.0: # %entry
3122 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3123 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3
3126 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, iXLen 3, iXLen %vl)
3130 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, iXLen, iXLen)
3132 define <vscale x 1 x half> @test_sf_vc_fv_fvvi_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
3133 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf4:
3134 ; CHECK: # %bb.0: # %entry
3135 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3136 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3
3139 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, iXLen 3, iXLen %vl)
3140 ret <vscale x 1 x half> %0
3143 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, iXLen, iXLen)
3145 define void @test_sf_vc_fvvi_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
3146 ; CHECK-LABEL: test_sf_vc_fvvi_se_e16mf2:
3147 ; CHECK: # %bb.0: # %entry
3148 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3149 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3
3152 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, iXLen 3, iXLen %vl)
3156 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, iXLen, iXLen)
3158 define <vscale x 2 x half> @test_sf_vc_fv_fvvi_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
3159 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16mf2:
3160 ; CHECK: # %bb.0: # %entry
3161 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3162 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3
3165 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, iXLen 3, iXLen %vl)
3166 ret <vscale x 2 x half> %0
3169 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, iXLen, iXLen)
3171 define void @test_sf_vc_fvvi_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
3172 ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m1:
3173 ; CHECK: # %bb.0: # %entry
3174 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3175 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3
3178 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, iXLen 3, iXLen %vl)
3182 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, iXLen, iXLen)
3184 define <vscale x 4 x half> @test_sf_vc_fv_fvvi_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
3185 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m1:
3186 ; CHECK: # %bb.0: # %entry
3187 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3188 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3
3191 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, iXLen 3, iXLen %vl)
3192 ret <vscale x 4 x half> %0
3195 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, iXLen, iXLen)
3197 define void @test_sf_vc_fvvi_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
3198 ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m2:
3199 ; CHECK: # %bb.0: # %entry
3200 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3201 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 3
3204 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, iXLen 3, iXLen %vl)
3208 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, iXLen, iXLen)
3210 define <vscale x 8 x half> @test_sf_vc_fv_fvvi_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
3211 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m2:
3212 ; CHECK: # %bb.0: # %entry
3213 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3214 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 3
3217 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, iXLen 3, iXLen %vl)
3218 ret <vscale x 8 x half> %0
3221 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, iXLen, iXLen)
3223 define void @test_sf_vc_fvvi_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
3224 ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m4:
3225 ; CHECK: # %bb.0: # %entry
3226 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3227 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 3
3230 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, iXLen 3, iXLen %vl)
3234 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, iXLen, iXLen)
3236 define <vscale x 16 x half> @test_sf_vc_fv_fvvi_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
3237 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m4:
3238 ; CHECK: # %bb.0: # %entry
3239 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3240 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 3
3243 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, iXLen 3, iXLen %vl)
3244 ret <vscale x 16 x half> %0
3247 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, iXLen, iXLen)
3249 define void @test_sf_vc_fvvi_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
3250 ; CHECK-LABEL: test_sf_vc_fvvi_se_e16m8:
3251 ; CHECK: # %bb.0: # %entry
3252 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3253 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 3
3256 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, iXLen 3, iXLen %vl)
3260 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, iXLen, iXLen)
3262 define <vscale x 32 x half> @test_sf_vc_fv_fvvi_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
3263 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e16m8:
3264 ; CHECK: # %bb.0: # %entry
3265 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3266 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 3
3269 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, iXLen 3, iXLen %vl)
3270 ret <vscale x 32 x half> %0
3273 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.nxv32f16.nxv32i16.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, iXLen, iXLen)
3275 define void @test_sf_vc_fvvi_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
3276 ; CHECK-LABEL: test_sf_vc_fvvi_se_e32mf2:
3277 ; CHECK: # %bb.0: # %entry
3278 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3279 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3
3282 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, iXLen 3, iXLen %vl)
3286 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, iXLen, iXLen)
3288 define <vscale x 1 x float> @test_sf_vc_fv_fvvi_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
3289 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32mf2:
3290 ; CHECK: # %bb.0: # %entry
3291 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3292 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3
3295 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, iXLen 3, iXLen %vl)
3296 ret <vscale x 1 x float> %0
3299 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, iXLen, iXLen)
3301 define void @test_sf_vc_fvvi_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
3302 ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m1:
3303 ; CHECK: # %bb.0: # %entry
3304 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3305 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 3
3308 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, iXLen 3, iXLen %vl)
3312 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, iXLen, iXLen)
3314 define <vscale x 2 x float> @test_sf_vc_fv_fvvi_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
3315 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m1:
3316 ; CHECK: # %bb.0: # %entry
3317 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3318 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 3
3321 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, iXLen 3, iXLen %vl)
3322 ret <vscale x 2 x float> %0
3325 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, iXLen, iXLen)
3327 define void @test_sf_vc_fvvi_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
3328 ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m2:
3329 ; CHECK: # %bb.0: # %entry
3330 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3331 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 3
3334 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, iXLen 3, iXLen %vl)
3338 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, iXLen, iXLen)
3340 define <vscale x 4 x float> @test_sf_vc_fv_fvvi_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
3341 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m2:
3342 ; CHECK: # %bb.0: # %entry
3343 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3344 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 3
3347 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, iXLen 3, iXLen %vl)
3348 ret <vscale x 4 x float> %0
3351 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, iXLen, iXLen)
3353 define void @test_sf_vc_fvvi_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
3354 ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m4:
3355 ; CHECK: # %bb.0: # %entry
3356 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3357 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 3
3360 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, iXLen 3, iXLen %vl)
3364 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, iXLen, iXLen)
3366 define <vscale x 8 x float> @test_sf_vc_fv_fvvi_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
3367 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m4:
3368 ; CHECK: # %bb.0: # %entry
3369 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3370 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 3
3373 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, iXLen 3, iXLen %vl)
3374 ret <vscale x 8 x float> %0
3377 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, iXLen, iXLen)
3379 define void @test_sf_vc_fvvi_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
3380 ; CHECK-LABEL: test_sf_vc_fvvi_se_e32m8:
3381 ; CHECK: # %bb.0: # %entry
3382 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
3383 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 3
3386 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, iXLen 3, iXLen %vl)
3390 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, iXLen, iXLen)
3392 define <vscale x 16 x float> @test_sf_vc_fv_fvvi_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
3393 ; CHECK-LABEL: test_sf_vc_fv_fvvi_se_e32m8:
3394 ; CHECK: # %bb.0: # %entry
3395 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
3396 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 3
3399 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, iXLen 3, iXLen %vl)
3400 ret <vscale x 16 x float> %0
3403 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.nxv16f32.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, iXLen, iXLen)
3405 define void @test_sf_vc_fvvf_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl) {
3406 ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf4:
3407 ; CHECK: # %bb.0: # %entry
3408 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3409 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
3412 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl)
3416 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, half, iXLen)
3418 define <vscale x 1 x half> @test_sf_vc_fv_fvvf_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl) {
3419 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf4:
3420 ; CHECK: # %bb.0: # %entry
3421 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3422 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
3425 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl)
3426 ret <vscale x 1 x half> %0
3429 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.nxv1f16.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x i16>, half %rs1, iXLen)
3431 define void @test_sf_vc_fvvf_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl) {
3432 ; CHECK-LABEL: test_sf_vc_fvvf_se_e16mf2:
3433 ; CHECK: # %bb.0: # %entry
3434 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3435 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
3438 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl)
3442 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, half, iXLen)
3444 define <vscale x 2 x half> @test_sf_vc_fv_fvvf_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl) {
3445 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16mf2:
3446 ; CHECK: # %bb.0: # %entry
3447 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3448 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
3451 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl)
3452 ret <vscale x 2 x half> %0
3455 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.nxv2f16.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x i16>, half %rs1, iXLen)
3457 define void @test_sf_vc_fvvf_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl) {
3458 ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m1:
3459 ; CHECK: # %bb.0: # %entry
3460 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3461 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
3464 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl)
3468 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, half, iXLen)
3470 define <vscale x 4 x half> @test_sf_vc_fv_fvvf_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl) {
3471 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m1:
3472 ; CHECK: # %bb.0: # %entry
3473 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3474 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
3477 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl)
3478 ret <vscale x 4 x half> %0
3481 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.nxv4f16.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x i16>, half %rs1, iXLen)
3483 define void @test_sf_vc_fvvf_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl) {
3484 ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m2:
3485 ; CHECK: # %bb.0: # %entry
3486 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3487 ; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
3490 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl)
3494 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, half, iXLen)
3496 define <vscale x 8 x half> @test_sf_vc_fv_fvvf_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl) {
3497 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m2:
3498 ; CHECK: # %bb.0: # %entry
3499 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3500 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
3503 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl)
3504 ret <vscale x 8 x half> %0
3507 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.nxv8f16.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x i16>, half %rs1, iXLen)
3509 define void @test_sf_vc_fvvf_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl) {
3510 ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m4:
3511 ; CHECK: # %bb.0: # %entry
3512 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3513 ; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
3516 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl)
3520 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, half, iXLen)
3522 define <vscale x 16 x half> @test_sf_vc_fv_fvvf_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl) {
3523 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m4:
3524 ; CHECK: # %bb.0: # %entry
3525 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3526 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
3529 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl)
3530 ret <vscale x 16 x half> %0
3533 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.nxv16f16.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x i16>, half %rs1, iXLen)
3535 define void @test_sf_vc_fvvf_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, half %rs1, iXLen %vl) {
3536 ; CHECK-LABEL: test_sf_vc_fvvf_se_e16m8:
3537 ; CHECK: # %bb.0: # %entry
3538 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3539 ; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
3542 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, half %rs1, iXLen %vl)
3546 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.nxv32i16.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, half, iXLen)
3548 define <vscale x 32 x half> @test_sf_vc_fv_fvvf_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, half %rs1, iXLen %vl) {
3549 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e16m8:
3550 ; CHECK: # %bb.0: # %entry
3551 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3552 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
3555 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x i16> %vs2, half %rs1, iXLen %vl)
3556 ret <vscale x 32 x half> %0
3559 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.nxv32f16.nxv32i16.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x i16>, half %rs1, iXLen)
3561 define void @test_sf_vc_fvvf_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl) {
3562 ; CHECK-LABEL: test_sf_vc_fvvf_se_e32mf2:
3563 ; CHECK: # %bb.0: # %entry
3564 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3565 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
3568 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl)
3572 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, float, iXLen)
3574 define <vscale x 1 x float> @test_sf_vc_fv_fvvf_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl) {
3575 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32mf2:
3576 ; CHECK: # %bb.0: # %entry
3577 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3578 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
3581 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl)
3582 ret <vscale x 1 x float> %0
3585 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.nxv1f32.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i32>, float %rs1, iXLen)
3587 define void @test_sf_vc_fvvf_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl) {
3588 ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m1:
3589 ; CHECK: # %bb.0: # %entry
3590 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3591 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
3594 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl)
3598 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, float, iXLen)
3600 define <vscale x 2 x float> @test_sf_vc_fv_fvvf_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl) {
3601 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m1:
3602 ; CHECK: # %bb.0: # %entry
3603 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3604 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
3607 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl)
3608 ret <vscale x 2 x float> %0
3611 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.nxv2f32.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i32>, float %rs1, iXLen)
3613 define void @test_sf_vc_fvvf_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl) {
3614 ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m2:
3615 ; CHECK: # %bb.0: # %entry
3616 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3617 ; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
3620 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl)
3624 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, float, iXLen)
3626 define <vscale x 4 x float> @test_sf_vc_fv_fvvf_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl) {
3627 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m2:
3628 ; CHECK: # %bb.0: # %entry
3629 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3630 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
3633 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl)
3634 ret <vscale x 4 x float> %0
3637 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.nxv4f32.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i32>, float %rs1, iXLen)
3639 define void @test_sf_vc_fvvf_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl) {
3640 ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m4:
3641 ; CHECK: # %bb.0: # %entry
3642 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3643 ; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
3646 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl)
3650 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, float, iXLen)
3652 define <vscale x 8 x float> @test_sf_vc_fv_fvvf_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl) {
3653 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m4:
3654 ; CHECK: # %bb.0: # %entry
3655 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3656 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
3659 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl)
3660 ret <vscale x 8 x float> %0
3663 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.nxv8f32.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i32>, float %rs1, iXLen)
3665 define void @test_sf_vc_fvvf_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, float %rs1, iXLen %vl) {
3666 ; CHECK-LABEL: test_sf_vc_fvvf_se_e32m8:
3667 ; CHECK: # %bb.0: # %entry
3668 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
3669 ; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
3672 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, float %rs1, iXLen %vl)
3676 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.nxv16i32.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, float, iXLen)
3678 define <vscale x 16 x float> @test_sf_vc_fv_fvvf_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, float %rs1, iXLen %vl) {
3679 ; CHECK-LABEL: test_sf_vc_fv_fvvf_se_e32m8:
3680 ; CHECK: # %bb.0: # %entry
3681 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
3682 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
3685 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x i32> %vs2, float %rs1, iXLen %vl)
3686 ret <vscale x 16 x float> %0
3689 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.nxv16f32.nxv16i32.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i32>, float %rs1, iXLen)