1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 define void @test_sf_vc_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
8 ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf8:
9 ; CHECK: # %bb.0: # %entry
10 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
11 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
14 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
18 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
20 define void @test_sf_vc_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
21 ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf4:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
24 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
27 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
31 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
33 define void @test_sf_vc_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
34 ; CHECK-LABEL: test_sf_vc_vvv_se_e8mf2:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
37 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
40 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
44 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
46 define void @test_sf_vc_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
47 ; CHECK-LABEL: test_sf_vc_vvv_se_e8m1:
48 ; CHECK: # %bb.0: # %entry
49 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
50 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
53 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
57 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
59 define void @test_sf_vc_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
60 ; CHECK-LABEL: test_sf_vc_vvv_se_e8m2:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
63 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
66 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
70 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
72 define void @test_sf_vc_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
73 ; CHECK-LABEL: test_sf_vc_vvv_se_e8m4:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
76 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
79 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
83 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
85 define void @test_sf_vc_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
86 ; CHECK-LABEL: test_sf_vc_vvv_se_e8m8:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vl8r.v v24, (a0)
89 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
90 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
93 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
97 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
99 define void @test_sf_vc_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
100 ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf4:
101 ; CHECK: # %bb.0: # %entry
102 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
103 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
106 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
110 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
112 define void @test_sf_vc_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
113 ; CHECK-LABEL: test_sf_vc_vvv_se_e16mf2:
114 ; CHECK: # %bb.0: # %entry
115 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
116 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
119 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
123 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
125 define void @test_sf_vc_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
126 ; CHECK-LABEL: test_sf_vc_vvv_se_e16m1:
127 ; CHECK: # %bb.0: # %entry
128 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
129 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
132 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
136 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
138 define void @test_sf_vc_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
139 ; CHECK-LABEL: test_sf_vc_vvv_se_e16m2:
140 ; CHECK: # %bb.0: # %entry
141 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
142 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
145 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
149 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
151 define void @test_sf_vc_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
152 ; CHECK-LABEL: test_sf_vc_vvv_se_e16m4:
153 ; CHECK: # %bb.0: # %entry
154 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
155 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
158 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
162 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
164 define void @test_sf_vc_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
165 ; CHECK-LABEL: test_sf_vc_vvv_se_e16m8:
166 ; CHECK: # %bb.0: # %entry
167 ; CHECK-NEXT: vl8re16.v v24, (a0)
168 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
169 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
172 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
176 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
178 define void @test_sf_vc_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
179 ; CHECK-LABEL: test_sf_vc_vvv_se_e32mf2:
180 ; CHECK: # %bb.0: # %entry
181 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
182 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
185 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
189 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
191 define void @test_sf_vc_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
192 ; CHECK-LABEL: test_sf_vc_vvv_se_e32m1:
193 ; CHECK: # %bb.0: # %entry
194 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
195 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
198 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
202 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
204 define void @test_sf_vc_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
205 ; CHECK-LABEL: test_sf_vc_vvv_se_e32m2:
206 ; CHECK: # %bb.0: # %entry
207 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
208 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
211 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
215 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
217 define void @test_sf_vc_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
218 ; CHECK-LABEL: test_sf_vc_vvv_se_e32m4:
219 ; CHECK: # %bb.0: # %entry
220 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
221 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
224 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
228 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
230 define void @test_sf_vc_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
231 ; CHECK-LABEL: test_sf_vc_vvv_se_e32m8:
232 ; CHECK: # %bb.0: # %entry
233 ; CHECK-NEXT: vl8re32.v v24, (a0)
234 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
235 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
238 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
242 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
244 define void @test_sf_vc_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
245 ; CHECK-LABEL: test_sf_vc_vvv_se_e64m1:
246 ; CHECK: # %bb.0: # %entry
247 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
248 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
251 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
255 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
257 define void @test_sf_vc_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
258 ; CHECK-LABEL: test_sf_vc_vvv_se_e64m2:
259 ; CHECK: # %bb.0: # %entry
260 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
261 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
264 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
268 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
270 define void @test_sf_vc_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
271 ; CHECK-LABEL: test_sf_vc_vvv_se_e64m4:
272 ; CHECK: # %bb.0: # %entry
273 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
274 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
277 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
281 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
283 define void @test_sf_vc_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
284 ; CHECK-LABEL: test_sf_vc_vvv_se_e64m8:
285 ; CHECK: # %bb.0: # %entry
286 ; CHECK-NEXT: vl8re64.v v24, (a0)
287 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
288 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
291 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
295 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
297 define <vscale x 1 x i8> @test_sf_vc_v_vvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
298 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf8:
299 ; CHECK: # %bb.0: # %entry
300 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
301 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
304 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
305 ret <vscale x 1 x i8> %0
308 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
310 define <vscale x 2 x i8> @test_sf_vc_v_vvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
311 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf4:
312 ; CHECK: # %bb.0: # %entry
313 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
314 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
317 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
318 ret <vscale x 2 x i8> %0
321 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
323 define <vscale x 4 x i8> @test_sf_vc_v_vvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
324 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8mf2:
325 ; CHECK: # %bb.0: # %entry
326 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
327 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
330 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
331 ret <vscale x 4 x i8> %0
334 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
336 define <vscale x 8 x i8> @test_sf_vc_v_vvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
337 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m1:
338 ; CHECK: # %bb.0: # %entry
339 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
340 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
343 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
344 ret <vscale x 8 x i8> %0
347 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
349 define <vscale x 16 x i8> @test_sf_vc_v_vvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
350 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m2:
351 ; CHECK: # %bb.0: # %entry
352 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
353 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
356 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
357 ret <vscale x 16 x i8> %0
360 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
362 define <vscale x 32 x i8> @test_sf_vc_v_vvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
363 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m4:
364 ; CHECK: # %bb.0: # %entry
365 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
366 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
369 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
370 ret <vscale x 32 x i8> %0
373 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
375 define <vscale x 64 x i8> @test_sf_vc_v_vvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
376 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e8m8:
377 ; CHECK: # %bb.0: # %entry
378 ; CHECK-NEXT: vl8r.v v24, (a0)
379 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
380 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
383 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
384 ret <vscale x 64 x i8> %0
387 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
389 define <vscale x 1 x i16> @test_sf_vc_v_vvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
390 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf4:
391 ; CHECK: # %bb.0: # %entry
392 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
393 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
396 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
397 ret <vscale x 1 x i16> %0
400 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
402 define <vscale x 2 x i16> @test_sf_vc_v_vvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
403 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16mf2:
404 ; CHECK: # %bb.0: # %entry
405 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
406 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
409 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
410 ret <vscale x 2 x i16> %0
413 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
415 define <vscale x 4 x i16> @test_sf_vc_v_vvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
416 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m1:
417 ; CHECK: # %bb.0: # %entry
418 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
419 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
422 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
423 ret <vscale x 4 x i16> %0
426 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
428 define <vscale x 8 x i16> @test_sf_vc_v_vvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
429 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m2:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
432 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
435 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
436 ret <vscale x 8 x i16> %0
439 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
441 define <vscale x 16 x i16> @test_sf_vc_v_vvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
442 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m4:
443 ; CHECK: # %bb.0: # %entry
444 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
445 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
448 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
449 ret <vscale x 16 x i16> %0
452 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
454 define <vscale x 32 x i16> @test_sf_vc_v_vvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
455 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e16m8:
456 ; CHECK: # %bb.0: # %entry
457 ; CHECK-NEXT: vl8re16.v v24, (a0)
458 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
459 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
462 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
463 ret <vscale x 32 x i16> %0
466 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
468 define <vscale x 1 x i32> @test_sf_vc_v_vvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
469 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32mf2:
470 ; CHECK: # %bb.0: # %entry
471 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
472 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
475 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
476 ret <vscale x 1 x i32> %0
479 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
481 define <vscale x 2 x i32> @test_sf_vc_v_vvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
482 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m1:
483 ; CHECK: # %bb.0: # %entry
484 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
485 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
488 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
489 ret <vscale x 2 x i32> %0
492 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
494 define <vscale x 4 x i32> @test_sf_vc_v_vvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
495 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m2:
496 ; CHECK: # %bb.0: # %entry
497 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
498 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
501 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
502 ret <vscale x 4 x i32> %0
505 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
507 define <vscale x 8 x i32> @test_sf_vc_v_vvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
508 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m4:
509 ; CHECK: # %bb.0: # %entry
510 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
511 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
514 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
515 ret <vscale x 8 x i32> %0
518 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
520 define <vscale x 16 x i32> @test_sf_vc_v_vvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
521 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e32m8:
522 ; CHECK: # %bb.0: # %entry
523 ; CHECK-NEXT: vl8re32.v v24, (a0)
524 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
525 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
528 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
529 ret <vscale x 16 x i32> %0
532 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
534 define <vscale x 1 x i64> @test_sf_vc_v_vvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
535 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m1:
536 ; CHECK: # %bb.0: # %entry
537 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
538 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
541 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
542 ret <vscale x 1 x i64> %0
545 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
547 define <vscale x 2 x i64> @test_sf_vc_v_vvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
548 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m2:
549 ; CHECK: # %bb.0: # %entry
550 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
551 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
554 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
555 ret <vscale x 2 x i64> %0
558 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
560 define <vscale x 4 x i64> @test_sf_vc_v_vvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
561 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m4:
562 ; CHECK: # %bb.0: # %entry
563 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
564 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
567 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
568 ret <vscale x 4 x i64> %0
571 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
573 define <vscale x 8 x i64> @test_sf_vc_v_vvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
574 ; CHECK-LABEL: test_sf_vc_v_vvv_se_e64m8:
575 ; CHECK: # %bb.0: # %entry
576 ; CHECK-NEXT: vl8re64.v v24, (a0)
577 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
578 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
581 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
582 ret <vscale x 8 x i64> %0
585 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
587 define <vscale x 1 x i8> @test_sf_vc_v_vvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
588 ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf8:
589 ; CHECK: # %bb.0: # %entry
590 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
591 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
594 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
595 ret <vscale x 1 x i8> %0
598 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vvv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
600 define <vscale x 2 x i8> @test_sf_vc_v_vvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
601 ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf4:
602 ; CHECK: # %bb.0: # %entry
603 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
604 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
607 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
608 ret <vscale x 2 x i8> %0
611 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vvv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
613 define <vscale x 4 x i8> @test_sf_vc_v_vvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
614 ; CHECK-LABEL: test_sf_vc_v_vvv_e8mf2:
615 ; CHECK: # %bb.0: # %entry
616 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
617 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
620 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
621 ret <vscale x 4 x i8> %0
624 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vvv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
626 define <vscale x 8 x i8> @test_sf_vc_v_vvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
627 ; CHECK-LABEL: test_sf_vc_v_vvv_e8m1:
628 ; CHECK: # %bb.0: # %entry
629 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
630 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
633 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
634 ret <vscale x 8 x i8> %0
637 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vvv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
639 define <vscale x 16 x i8> @test_sf_vc_v_vvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
640 ; CHECK-LABEL: test_sf_vc_v_vvv_e8m2:
641 ; CHECK: # %bb.0: # %entry
642 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
643 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
646 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
647 ret <vscale x 16 x i8> %0
650 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vvv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
652 define <vscale x 32 x i8> @test_sf_vc_v_vvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
653 ; CHECK-LABEL: test_sf_vc_v_vvv_e8m4:
654 ; CHECK: # %bb.0: # %entry
655 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
656 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
659 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
660 ret <vscale x 32 x i8> %0
663 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vvv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
665 define <vscale x 64 x i8> @test_sf_vc_v_vvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
666 ; CHECK-LABEL: test_sf_vc_v_vvv_e8m8:
667 ; CHECK: # %bb.0: # %entry
668 ; CHECK-NEXT: vl8r.v v24, (a0)
669 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
670 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
673 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
674 ret <vscale x 64 x i8> %0
677 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vvv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
679 define <vscale x 1 x i16> @test_sf_vc_v_vvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
680 ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf4:
681 ; CHECK: # %bb.0: # %entry
682 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
683 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
686 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
687 ret <vscale x 1 x i16> %0
690 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
692 define <vscale x 2 x i16> @test_sf_vc_v_vvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
693 ; CHECK-LABEL: test_sf_vc_v_vvv_e16mf2:
694 ; CHECK: # %bb.0: # %entry
695 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
696 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
699 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
700 ret <vscale x 2 x i16> %0
703 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
705 define <vscale x 4 x i16> @test_sf_vc_v_vvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
706 ; CHECK-LABEL: test_sf_vc_v_vvv_e16m1:
707 ; CHECK: # %bb.0: # %entry
708 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
709 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
712 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
713 ret <vscale x 4 x i16> %0
716 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
718 define <vscale x 8 x i16> @test_sf_vc_v_vvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
719 ; CHECK-LABEL: test_sf_vc_v_vvv_e16m2:
720 ; CHECK: # %bb.0: # %entry
721 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
722 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
725 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
726 ret <vscale x 8 x i16> %0
729 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
731 define <vscale x 16 x i16> @test_sf_vc_v_vvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
732 ; CHECK-LABEL: test_sf_vc_v_vvv_e16m4:
733 ; CHECK: # %bb.0: # %entry
734 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
735 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
738 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
739 ret <vscale x 16 x i16> %0
742 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
744 define <vscale x 32 x i16> @test_sf_vc_v_vvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
745 ; CHECK-LABEL: test_sf_vc_v_vvv_e16m8:
746 ; CHECK: # %bb.0: # %entry
747 ; CHECK-NEXT: vl8re16.v v24, (a0)
748 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
749 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
752 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
753 ret <vscale x 32 x i16> %0
756 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
758 define <vscale x 1 x i32> @test_sf_vc_v_vvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
759 ; CHECK-LABEL: test_sf_vc_v_vvv_e32mf2:
760 ; CHECK: # %bb.0: # %entry
761 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
762 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
765 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
766 ret <vscale x 1 x i32> %0
769 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
771 define <vscale x 2 x i32> @test_sf_vc_v_vvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
772 ; CHECK-LABEL: test_sf_vc_v_vvv_e32m1:
773 ; CHECK: # %bb.0: # %entry
774 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
775 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
778 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
779 ret <vscale x 2 x i32> %0
782 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
784 define <vscale x 4 x i32> @test_sf_vc_v_vvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
785 ; CHECK-LABEL: test_sf_vc_v_vvv_e32m2:
786 ; CHECK: # %bb.0: # %entry
787 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
788 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
791 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
792 ret <vscale x 4 x i32> %0
795 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
797 define <vscale x 8 x i32> @test_sf_vc_v_vvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
798 ; CHECK-LABEL: test_sf_vc_v_vvv_e32m4:
799 ; CHECK: # %bb.0: # %entry
800 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
801 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
804 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
805 ret <vscale x 8 x i32> %0
808 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
810 define <vscale x 16 x i32> @test_sf_vc_v_vvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
811 ; CHECK-LABEL: test_sf_vc_v_vvv_e32m8:
812 ; CHECK: # %bb.0: # %entry
813 ; CHECK-NEXT: vl8re32.v v24, (a0)
814 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
815 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
818 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
819 ret <vscale x 16 x i32> %0
822 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
824 define <vscale x 1 x i64> @test_sf_vc_v_vvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
825 ; CHECK-LABEL: test_sf_vc_v_vvv_e64m1:
826 ; CHECK: # %bb.0: # %entry
827 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
828 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
831 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
832 ret <vscale x 1 x i64> %0
835 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
837 define <vscale x 2 x i64> @test_sf_vc_v_vvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
838 ; CHECK-LABEL: test_sf_vc_v_vvv_e64m2:
839 ; CHECK: # %bb.0: # %entry
840 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
841 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
844 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
845 ret <vscale x 2 x i64> %0
848 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
850 define <vscale x 4 x i64> @test_sf_vc_v_vvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
851 ; CHECK-LABEL: test_sf_vc_v_vvv_e64m4:
852 ; CHECK: # %bb.0: # %entry
853 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
854 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
857 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
858 ret <vscale x 4 x i64> %0
861 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
863 define <vscale x 8 x i64> @test_sf_vc_v_vvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
864 ; CHECK-LABEL: test_sf_vc_v_vvv_e64m8:
865 ; CHECK: # %bb.0: # %entry
866 ; CHECK-NEXT: vl8re64.v v24, (a0)
867 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
868 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
871 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
872 ret <vscale x 8 x i64> %0
875 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
877 define void @test_sf_vc_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
878 ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf8:
879 ; CHECK: # %bb.0: # %entry
880 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
881 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
884 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
888 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
890 define void @test_sf_vc_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
891 ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf4:
892 ; CHECK: # %bb.0: # %entry
893 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
894 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
897 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
901 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
903 define void @test_sf_vc_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
904 ; CHECK-LABEL: test_sf_vc_xvv_se_e8mf2:
905 ; CHECK: # %bb.0: # %entry
906 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
907 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
910 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
914 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
916 define void @test_sf_vc_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
917 ; CHECK-LABEL: test_sf_vc_xvv_se_e8m1:
918 ; CHECK: # %bb.0: # %entry
919 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
920 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
923 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
927 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
929 define void @test_sf_vc_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
930 ; CHECK-LABEL: test_sf_vc_xvv_se_e8m2:
931 ; CHECK: # %bb.0: # %entry
932 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
933 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
936 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
940 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
942 define void @test_sf_vc_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
943 ; CHECK-LABEL: test_sf_vc_xvv_se_e8m4:
944 ; CHECK: # %bb.0: # %entry
945 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
946 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
949 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
953 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
955 define void @test_sf_vc_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
956 ; CHECK-LABEL: test_sf_vc_xvv_se_e8m8:
957 ; CHECK: # %bb.0: # %entry
958 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
959 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
962 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
966 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv64i8.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
968 define void @test_sf_vc_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
969 ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf4:
970 ; CHECK: # %bb.0: # %entry
971 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
972 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
975 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
979 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
981 define void @test_sf_vc_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
982 ; CHECK-LABEL: test_sf_vc_xvv_se_e16mf2:
983 ; CHECK: # %bb.0: # %entry
984 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
985 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
988 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
992 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
994 define void @test_sf_vc_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
995 ; CHECK-LABEL: test_sf_vc_xvv_se_e16m1:
996 ; CHECK: # %bb.0: # %entry
997 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
998 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
1001 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1005 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1007 define void @test_sf_vc_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1008 ; CHECK-LABEL: test_sf_vc_xvv_se_e16m2:
1009 ; CHECK: # %bb.0: # %entry
1010 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1011 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
1014 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1018 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1020 define void @test_sf_vc_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1021 ; CHECK-LABEL: test_sf_vc_xvv_se_e16m4:
1022 ; CHECK: # %bb.0: # %entry
1023 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1024 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
1027 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1031 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1033 define void @test_sf_vc_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1034 ; CHECK-LABEL: test_sf_vc_xvv_se_e16m8:
1035 ; CHECK: # %bb.0: # %entry
1036 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1037 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
1040 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1044 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32i16.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1046 define void @test_sf_vc_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1047 ; CHECK-LABEL: test_sf_vc_xvv_se_e32mf2:
1048 ; CHECK: # %bb.0: # %entry
1049 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1050 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
1053 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1057 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1059 define void @test_sf_vc_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1060 ; CHECK-LABEL: test_sf_vc_xvv_se_e32m1:
1061 ; CHECK: # %bb.0: # %entry
1062 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1063 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
1066 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1070 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1072 define void @test_sf_vc_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1073 ; CHECK-LABEL: test_sf_vc_xvv_se_e32m2:
1074 ; CHECK: # %bb.0: # %entry
1075 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1076 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
1079 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1083 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1085 define void @test_sf_vc_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1086 ; CHECK-LABEL: test_sf_vc_xvv_se_e32m4:
1087 ; CHECK: # %bb.0: # %entry
1088 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1089 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
1092 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1096 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1098 define void @test_sf_vc_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1099 ; CHECK-LABEL: test_sf_vc_xvv_se_e32m8:
1100 ; CHECK: # %bb.0: # %entry
1101 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1102 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
1105 tail call void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1109 declare void @llvm.riscv.sf.vc.xvv.se.i32.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1111 define <vscale x 1 x i8> @test_sf_vc_v_xvv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1112 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf8:
1113 ; CHECK: # %bb.0: # %entry
1114 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1115 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1118 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
1119 ret <vscale x 1 x i8> %0
1122 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
1124 define <vscale x 2 x i8> @test_sf_vc_v_xvv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1125 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf4:
1126 ; CHECK: # %bb.0: # %entry
1127 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1128 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1131 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1132 ret <vscale x 2 x i8> %0
1135 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
1137 define <vscale x 4 x i8> @test_sf_vc_v_xvv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1138 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8mf2:
1139 ; CHECK: # %bb.0: # %entry
1140 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1141 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1144 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1145 ret <vscale x 4 x i8> %0
1148 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
1150 define <vscale x 8 x i8> @test_sf_vc_v_xvv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1151 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m1:
1152 ; CHECK: # %bb.0: # %entry
1153 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1154 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1157 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1158 ret <vscale x 8 x i8> %0
1161 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
1163 define <vscale x 16 x i8> @test_sf_vc_v_xvv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1164 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m2:
1165 ; CHECK: # %bb.0: # %entry
1166 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1167 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1170 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1171 ret <vscale x 16 x i8> %0
1174 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
1176 define <vscale x 32 x i8> @test_sf_vc_v_xvv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1177 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m4:
1178 ; CHECK: # %bb.0: # %entry
1179 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1180 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1183 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1184 ret <vscale x 32 x i8> %0
1187 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
1189 define <vscale x 64 x i8> @test_sf_vc_v_xvv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1190 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e8m8:
1191 ; CHECK: # %bb.0: # %entry
1192 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1193 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1196 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
1197 ret <vscale x 64 x i8> %0
1200 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
1202 define <vscale x 1 x i16> @test_sf_vc_v_xvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1203 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf4:
1204 ; CHECK: # %bb.0: # %entry
1205 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1206 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1209 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1210 ret <vscale x 1 x i16> %0
1213 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
1215 define <vscale x 2 x i16> @test_sf_vc_v_xvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1216 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16mf2:
1217 ; CHECK: # %bb.0: # %entry
1218 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1219 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1222 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1223 ret <vscale x 2 x i16> %0
1226 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
1228 define <vscale x 4 x i16> @test_sf_vc_v_xvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1229 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m1:
1230 ; CHECK: # %bb.0: # %entry
1231 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1232 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1235 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1236 ret <vscale x 4 x i16> %0
1239 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1241 define <vscale x 8 x i16> @test_sf_vc_v_xvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1242 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m2:
1243 ; CHECK: # %bb.0: # %entry
1244 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1245 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1248 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1249 ret <vscale x 8 x i16> %0
1252 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1254 define <vscale x 16 x i16> @test_sf_vc_v_xvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1255 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m4:
1256 ; CHECK: # %bb.0: # %entry
1257 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1258 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1261 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1262 ret <vscale x 16 x i16> %0
1265 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1267 define <vscale x 32 x i16> @test_sf_vc_v_xvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1268 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e16m8:
1269 ; CHECK: # %bb.0: # %entry
1270 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1271 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1274 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1275 ret <vscale x 32 x i16> %0
1278 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1280 define <vscale x 1 x i32> @test_sf_vc_v_xvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1281 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32mf2:
1282 ; CHECK: # %bb.0: # %entry
1283 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1284 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1287 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1288 ret <vscale x 1 x i32> %0
1291 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv1i32.iXLen.i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1293 define <vscale x 2 x i32> @test_sf_vc_v_xvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1294 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m1:
1295 ; CHECK: # %bb.0: # %entry
1296 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1297 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1300 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1301 ret <vscale x 2 x i32> %0
1304 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv2i32.iXLen.i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1306 define <vscale x 4 x i32> @test_sf_vc_v_xvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1307 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m2:
1308 ; CHECK: # %bb.0: # %entry
1309 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1310 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1313 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1314 ret <vscale x 4 x i32> %0
1317 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv4i32.iXLen.i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1319 define <vscale x 8 x i32> @test_sf_vc_v_xvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1320 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m4:
1321 ; CHECK: # %bb.0: # %entry
1322 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1323 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1326 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1327 ret <vscale x 8 x i32> %0
1330 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv8i32.iXLen.i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1332 define <vscale x 16 x i32> @test_sf_vc_v_xvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1333 ; CHECK-LABEL: test_sf_vc_v_xvv_se_e32m8:
1334 ; CHECK: # %bb.0: # %entry
1335 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1336 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1339 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1340 ret <vscale x 16 x i32> %0
1343 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.se.nxv16i32.iXLen.i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1345 define <vscale x 1 x i8> @test_sf_vc_v_xvv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1346 ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf8:
1347 ; CHECK: # %bb.0: # %entry
1348 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1349 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1352 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
1353 ret <vscale x 1 x i8> %0
1356 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xvv.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, i8, iXLen)
1358 define <vscale x 2 x i8> @test_sf_vc_v_xvv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1359 ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf4:
1360 ; CHECK: # %bb.0: # %entry
1361 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1362 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1365 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1366 ret <vscale x 2 x i8> %0
1369 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xvv.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, i8, iXLen)
1371 define <vscale x 4 x i8> @test_sf_vc_v_xvv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1372 ; CHECK-LABEL: test_sf_vc_v_xvv_e8mf2:
1373 ; CHECK: # %bb.0: # %entry
1374 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1375 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1378 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1379 ret <vscale x 4 x i8> %0
1382 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xvv.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, i8, iXLen)
1384 define <vscale x 8 x i8> @test_sf_vc_v_xvv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1385 ; CHECK-LABEL: test_sf_vc_v_xvv_e8m1:
1386 ; CHECK: # %bb.0: # %entry
1387 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1388 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1391 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1392 ret <vscale x 8 x i8> %0
1395 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xvv.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, i8, iXLen)
1397 define <vscale x 16 x i8> @test_sf_vc_v_xvv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1398 ; CHECK-LABEL: test_sf_vc_v_xvv_e8m2:
1399 ; CHECK: # %bb.0: # %entry
1400 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1401 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1404 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1405 ret <vscale x 16 x i8> %0
1408 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xvv.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, i8, iXLen)
1410 define <vscale x 32 x i8> @test_sf_vc_v_xvv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1411 ; CHECK-LABEL: test_sf_vc_v_xvv_e8m4:
1412 ; CHECK: # %bb.0: # %entry
1413 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1414 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1417 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1418 ret <vscale x 32 x i8> %0
1421 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xvv.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, i8, iXLen)
1423 define <vscale x 64 x i8> @test_sf_vc_v_xvv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1424 ; CHECK-LABEL: test_sf_vc_v_xvv_e8m8:
1425 ; CHECK: # %bb.0: # %entry
1426 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1427 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1430 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
1431 ret <vscale x 64 x i8> %0
1434 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xvv.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, i8, iXLen)
1436 define <vscale x 1 x i16> @test_sf_vc_v_xvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1437 ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf4:
1438 ; CHECK: # %bb.0: # %entry
1439 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1440 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1443 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1444 ret <vscale x 1 x i16> %0
1447 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvv.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, i16, iXLen)
1449 define <vscale x 2 x i16> @test_sf_vc_v_xvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1450 ; CHECK-LABEL: test_sf_vc_v_xvv_e16mf2:
1451 ; CHECK: # %bb.0: # %entry
1452 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1453 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1456 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1457 ret <vscale x 2 x i16> %0
1460 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvv.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, i16, iXLen)
1462 define <vscale x 4 x i16> @test_sf_vc_v_xvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1463 ; CHECK-LABEL: test_sf_vc_v_xvv_e16m1:
1464 ; CHECK: # %bb.0: # %entry
1465 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1466 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1469 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1470 ret <vscale x 4 x i16> %0
1473 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvv.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, i16, iXLen)
1475 define <vscale x 8 x i16> @test_sf_vc_v_xvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1476 ; CHECK-LABEL: test_sf_vc_v_xvv_e16m2:
1477 ; CHECK: # %bb.0: # %entry
1478 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1479 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1482 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1483 ret <vscale x 8 x i16> %0
1486 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvv.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, i16, iXLen)
1488 define <vscale x 16 x i16> @test_sf_vc_v_xvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1489 ; CHECK-LABEL: test_sf_vc_v_xvv_e16m4:
1490 ; CHECK: # %bb.0: # %entry
1491 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1492 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1495 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1496 ret <vscale x 16 x i16> %0
1499 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvv.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, i16, iXLen)
1501 define <vscale x 32 x i16> @test_sf_vc_v_xvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1502 ; CHECK-LABEL: test_sf_vc_v_xvv_e16m8:
1503 ; CHECK: # %bb.0: # %entry
1504 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1505 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1508 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1509 ret <vscale x 32 x i16> %0
1512 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvv.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, i16, iXLen)
1514 define <vscale x 1 x i32> @test_sf_vc_v_xvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1515 ; CHECK-LABEL: test_sf_vc_v_xvv_e32mf2:
1516 ; CHECK: # %bb.0: # %entry
1517 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1518 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1521 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1522 ret <vscale x 1 x i32> %0
1525 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvv.nxv1i32.iXLen.i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, i32, iXLen)
1527 define <vscale x 2 x i32> @test_sf_vc_v_xvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1528 ; CHECK-LABEL: test_sf_vc_v_xvv_e32m1:
1529 ; CHECK: # %bb.0: # %entry
1530 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1531 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
1534 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1535 ret <vscale x 2 x i32> %0
1538 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvv.nxv2i32.iXLen.i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, i32, iXLen)
1540 define <vscale x 4 x i32> @test_sf_vc_v_xvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1541 ; CHECK-LABEL: test_sf_vc_v_xvv_e32m2:
1542 ; CHECK: # %bb.0: # %entry
1543 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1544 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
1547 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1548 ret <vscale x 4 x i32> %0
1551 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvv.nxv4i32.iXLen.i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, i32, iXLen)
1553 define <vscale x 8 x i32> @test_sf_vc_v_xvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1554 ; CHECK-LABEL: test_sf_vc_v_xvv_e32m4:
1555 ; CHECK: # %bb.0: # %entry
1556 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1557 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
1560 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1561 ret <vscale x 8 x i32> %0
1564 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvv.nxv8i32.iXLen.i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, i32, iXLen)
1566 define <vscale x 16 x i32> @test_sf_vc_v_xvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1567 ; CHECK-LABEL: test_sf_vc_v_xvv_e32m8:
1568 ; CHECK: # %bb.0: # %entry
1569 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1570 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
1573 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1574 ret <vscale x 16 x i32> %0
1577 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvv.nxv16i32.iXLen.i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, i32, iXLen)
1579 define void @test_sf_vc_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1580 ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf8:
1581 ; CHECK: # %bb.0: # %entry
1582 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1583 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1586 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1590 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
1592 define void @test_sf_vc_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1593 ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf4:
1594 ; CHECK: # %bb.0: # %entry
1595 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1596 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1599 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1603 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
1605 define void @test_sf_vc_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1606 ; CHECK-LABEL: test_sf_vc_ivv_se_e8mf2:
1607 ; CHECK: # %bb.0: # %entry
1608 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1609 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1612 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1616 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
1618 define void @test_sf_vc_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1619 ; CHECK-LABEL: test_sf_vc_ivv_se_e8m1:
1620 ; CHECK: # %bb.0: # %entry
1621 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1622 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1625 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1629 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
1631 define void @test_sf_vc_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1632 ; CHECK-LABEL: test_sf_vc_ivv_se_e8m2:
1633 ; CHECK: # %bb.0: # %entry
1634 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1635 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
1638 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1642 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
1644 define void @test_sf_vc_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1645 ; CHECK-LABEL: test_sf_vc_ivv_se_e8m4:
1646 ; CHECK: # %bb.0: # %entry
1647 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1648 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
1651 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1655 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
1657 define void @test_sf_vc_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
1658 ; CHECK-LABEL: test_sf_vc_ivv_se_e8m8:
1659 ; CHECK: # %bb.0: # %entry
1660 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1661 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
1664 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
1668 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
1670 define void @test_sf_vc_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1671 ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf4:
1672 ; CHECK: # %bb.0: # %entry
1673 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1674 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1677 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1681 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
1683 define void @test_sf_vc_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1684 ; CHECK-LABEL: test_sf_vc_ivv_se_e16mf2:
1685 ; CHECK: # %bb.0: # %entry
1686 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1687 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1690 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1694 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
1696 define void @test_sf_vc_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1697 ; CHECK-LABEL: test_sf_vc_ivv_se_e16m1:
1698 ; CHECK: # %bb.0: # %entry
1699 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1700 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1703 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1707 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
1709 define void @test_sf_vc_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1710 ; CHECK-LABEL: test_sf_vc_ivv_se_e16m2:
1711 ; CHECK: # %bb.0: # %entry
1712 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1713 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
1716 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1720 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
1722 define void @test_sf_vc_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1723 ; CHECK-LABEL: test_sf_vc_ivv_se_e16m4:
1724 ; CHECK: # %bb.0: # %entry
1725 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1726 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
1729 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1733 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
1735 define void @test_sf_vc_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
1736 ; CHECK-LABEL: test_sf_vc_ivv_se_e16m8:
1737 ; CHECK: # %bb.0: # %entry
1738 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1739 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
1742 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
1746 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
1748 define void @test_sf_vc_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1749 ; CHECK-LABEL: test_sf_vc_ivv_se_e32mf2:
1750 ; CHECK: # %bb.0: # %entry
1751 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1752 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1755 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1759 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
1761 define void @test_sf_vc_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1762 ; CHECK-LABEL: test_sf_vc_ivv_se_e32m1:
1763 ; CHECK: # %bb.0: # %entry
1764 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1765 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1768 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1772 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
1774 define void @test_sf_vc_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1775 ; CHECK-LABEL: test_sf_vc_ivv_se_e32m2:
1776 ; CHECK: # %bb.0: # %entry
1777 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1778 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
1781 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1785 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
1787 define void @test_sf_vc_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1788 ; CHECK-LABEL: test_sf_vc_ivv_se_e32m4:
1789 ; CHECK: # %bb.0: # %entry
1790 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1791 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
1794 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1798 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
1800 define void @test_sf_vc_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
1801 ; CHECK-LABEL: test_sf_vc_ivv_se_e32m8:
1802 ; CHECK: # %bb.0: # %entry
1803 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1804 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
1807 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
1811 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
1813 define void @test_sf_vc_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
1814 ; CHECK-LABEL: test_sf_vc_ivv_se_e64m1:
1815 ; CHECK: # %bb.0: # %entry
1816 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1817 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
1820 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
1824 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
1826 define void @test_sf_vc_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
1827 ; CHECK-LABEL: test_sf_vc_ivv_se_e64m2:
1828 ; CHECK: # %bb.0: # %entry
1829 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1830 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
1833 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
1837 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
1839 define void @test_sf_vc_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
1840 ; CHECK-LABEL: test_sf_vc_ivv_se_e64m4:
1841 ; CHECK: # %bb.0: # %entry
1842 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1843 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
1846 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
1850 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
1852 define void @test_sf_vc_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
1853 ; CHECK-LABEL: test_sf_vc_ivv_se_e64m8:
1854 ; CHECK: # %bb.0: # %entry
1855 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1856 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
1859 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
1863 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
1865 define <vscale x 1 x i8> @test_sf_vc_v_ivv_se_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1866 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf8:
1867 ; CHECK: # %bb.0: # %entry
1868 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1869 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1872 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1873 ret <vscale x 1 x i8> %0
1876 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
1878 define <vscale x 2 x i8> @test_sf_vc_v_ivv_se_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1879 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf4:
1880 ; CHECK: # %bb.0: # %entry
1881 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1882 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1885 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1886 ret <vscale x 2 x i8> %0
1889 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
1891 define <vscale x 4 x i8> @test_sf_vc_v_ivv_se_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1892 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8mf2:
1893 ; CHECK: # %bb.0: # %entry
1894 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1895 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1898 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1899 ret <vscale x 4 x i8> %0
1902 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
1904 define <vscale x 8 x i8> @test_sf_vc_v_ivv_se_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1905 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m1:
1906 ; CHECK: # %bb.0: # %entry
1907 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1908 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1911 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1912 ret <vscale x 8 x i8> %0
1915 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
1917 define <vscale x 16 x i8> @test_sf_vc_v_ivv_se_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1918 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m2:
1919 ; CHECK: # %bb.0: # %entry
1920 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1921 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
1924 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1925 ret <vscale x 16 x i8> %0
1928 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
1930 define <vscale x 32 x i8> @test_sf_vc_v_ivv_se_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1931 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m4:
1932 ; CHECK: # %bb.0: # %entry
1933 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1934 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
1937 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1938 ret <vscale x 32 x i8> %0
1941 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
1943 define <vscale x 64 x i8> @test_sf_vc_v_ivv_se_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
1944 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e8m8:
1945 ; CHECK: # %bb.0: # %entry
1946 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1947 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
1950 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
1951 ret <vscale x 64 x i8> %0
1954 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
1956 define <vscale x 1 x i16> @test_sf_vc_v_ivv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1957 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf4:
1958 ; CHECK: # %bb.0: # %entry
1959 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1960 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1963 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1964 ret <vscale x 1 x i16> %0
1967 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
1969 define <vscale x 2 x i16> @test_sf_vc_v_ivv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1970 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16mf2:
1971 ; CHECK: # %bb.0: # %entry
1972 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1973 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1976 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1977 ret <vscale x 2 x i16> %0
1980 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
1982 define <vscale x 4 x i16> @test_sf_vc_v_ivv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1983 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m1:
1984 ; CHECK: # %bb.0: # %entry
1985 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1986 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
1989 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1990 ret <vscale x 4 x i16> %0
1993 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
1995 define <vscale x 8 x i16> @test_sf_vc_v_ivv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1996 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m2:
1997 ; CHECK: # %bb.0: # %entry
1998 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1999 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2002 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
2003 ret <vscale x 8 x i16> %0
2006 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
2008 define <vscale x 16 x i16> @test_sf_vc_v_ivv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
2009 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m4:
2010 ; CHECK: # %bb.0: # %entry
2011 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2012 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2015 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
2016 ret <vscale x 16 x i16> %0
2019 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
2021 define <vscale x 32 x i16> @test_sf_vc_v_ivv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
2022 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e16m8:
2023 ; CHECK: # %bb.0: # %entry
2024 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2025 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2028 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
2029 ret <vscale x 32 x i16> %0
2032 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
2034 define <vscale x 1 x i32> @test_sf_vc_v_ivv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
2035 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32mf2:
2036 ; CHECK: # %bb.0: # %entry
2037 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2038 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2041 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
2042 ret <vscale x 1 x i32> %0
2045 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
2047 define <vscale x 2 x i32> @test_sf_vc_v_ivv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
2048 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m1:
2049 ; CHECK: # %bb.0: # %entry
2050 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2051 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2054 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
2055 ret <vscale x 2 x i32> %0
2058 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
2060 define <vscale x 4 x i32> @test_sf_vc_v_ivv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
2061 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m2:
2062 ; CHECK: # %bb.0: # %entry
2063 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2064 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2067 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
2068 ret <vscale x 4 x i32> %0
2071 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
2073 define <vscale x 8 x i32> @test_sf_vc_v_ivv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
2074 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m4:
2075 ; CHECK: # %bb.0: # %entry
2076 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2077 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2080 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
2081 ret <vscale x 8 x i32> %0
2084 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
2086 define <vscale x 16 x i32> @test_sf_vc_v_ivv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
2087 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e32m8:
2088 ; CHECK: # %bb.0: # %entry
2089 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2090 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2093 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
2094 ret <vscale x 16 x i32> %0
2097 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
2099 define <vscale x 1 x i64> @test_sf_vc_v_ivv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
2100 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m1:
2101 ; CHECK: # %bb.0: # %entry
2102 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2103 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2106 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
2107 ret <vscale x 1 x i64> %0
2110 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
2112 define <vscale x 2 x i64> @test_sf_vc_v_ivv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
2113 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m2:
2114 ; CHECK: # %bb.0: # %entry
2115 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2116 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2119 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
2120 ret <vscale x 2 x i64> %0
2123 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
2125 define <vscale x 4 x i64> @test_sf_vc_v_ivv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
2126 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m4:
2127 ; CHECK: # %bb.0: # %entry
2128 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2129 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2132 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
2133 ret <vscale x 4 x i64> %0
2136 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
2138 define <vscale x 8 x i64> @test_sf_vc_v_ivv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
2139 ; CHECK-LABEL: test_sf_vc_v_ivv_se_e64m8:
2140 ; CHECK: # %bb.0: # %entry
2141 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2142 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2145 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
2146 ret <vscale x 8 x i64> %0
2149 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
2151 define <vscale x 1 x i8> @test_sf_vc_v_ivv_e8mf8(<vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
2152 ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf8:
2153 ; CHECK: # %bb.0: # %entry
2154 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2155 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2158 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
2159 ret <vscale x 1 x i8> %0
2162 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.ivv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen, iXLen)
2164 define <vscale x 2 x i8> @test_sf_vc_v_ivv_e8mf4(<vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
2165 ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf4:
2166 ; CHECK: # %bb.0: # %entry
2167 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2168 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2171 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
2172 ret <vscale x 2 x i8> %0
2175 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.ivv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen, iXLen)
2177 define <vscale x 4 x i8> @test_sf_vc_v_ivv_e8mf2(<vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
2178 ; CHECK-LABEL: test_sf_vc_v_ivv_e8mf2:
2179 ; CHECK: # %bb.0: # %entry
2180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2181 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2184 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
2185 ret <vscale x 4 x i8> %0
2188 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.ivv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen, iXLen)
2190 define <vscale x 8 x i8> @test_sf_vc_v_ivv_e8m1(<vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
2191 ; CHECK-LABEL: test_sf_vc_v_ivv_e8m1:
2192 ; CHECK: # %bb.0: # %entry
2193 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2194 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2197 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
2198 ret <vscale x 8 x i8> %0
2201 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.ivv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen, iXLen)
2203 define <vscale x 16 x i8> @test_sf_vc_v_ivv_e8m2(<vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
2204 ; CHECK-LABEL: test_sf_vc_v_ivv_e8m2:
2205 ; CHECK: # %bb.0: # %entry
2206 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2207 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2210 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
2211 ret <vscale x 16 x i8> %0
2214 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.ivv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen, iXLen)
2216 define <vscale x 32 x i8> @test_sf_vc_v_ivv_e8m4(<vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
2217 ; CHECK-LABEL: test_sf_vc_v_ivv_e8m4:
2218 ; CHECK: # %bb.0: # %entry
2219 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2220 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2223 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
2224 ret <vscale x 32 x i8> %0
2227 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.ivv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen, iXLen)
2229 define <vscale x 64 x i8> @test_sf_vc_v_ivv_e8m8(<vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen %vl) {
2230 ; CHECK-LABEL: test_sf_vc_v_ivv_e8m8:
2231 ; CHECK: # %bb.0: # %entry
2232 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2233 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2236 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vd, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
2237 ret <vscale x 64 x i8> %0
2240 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.ivv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen, iXLen)
2242 define <vscale x 1 x i16> @test_sf_vc_v_ivv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
2243 ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf4:
2244 ; CHECK: # %bb.0: # %entry
2245 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2246 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2249 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
2250 ret <vscale x 1 x i16> %0
2253 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen, iXLen)
2255 define <vscale x 2 x i16> @test_sf_vc_v_ivv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
2256 ; CHECK-LABEL: test_sf_vc_v_ivv_e16mf2:
2257 ; CHECK: # %bb.0: # %entry
2258 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2259 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2262 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
2263 ret <vscale x 2 x i16> %0
2266 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen, iXLen)
2268 define <vscale x 4 x i16> @test_sf_vc_v_ivv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
2269 ; CHECK-LABEL: test_sf_vc_v_ivv_e16m1:
2270 ; CHECK: # %bb.0: # %entry
2271 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2272 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2275 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
2276 ret <vscale x 4 x i16> %0
2279 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen, iXLen)
2281 define <vscale x 8 x i16> @test_sf_vc_v_ivv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
2282 ; CHECK-LABEL: test_sf_vc_v_ivv_e16m2:
2283 ; CHECK: # %bb.0: # %entry
2284 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2285 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2288 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
2289 ret <vscale x 8 x i16> %0
2292 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen, iXLen)
2294 define <vscale x 16 x i16> @test_sf_vc_v_ivv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
2295 ; CHECK-LABEL: test_sf_vc_v_ivv_e16m4:
2296 ; CHECK: # %bb.0: # %entry
2297 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2298 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2301 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
2302 ret <vscale x 16 x i16> %0
2305 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen, iXLen)
2307 define <vscale x 32 x i16> @test_sf_vc_v_ivv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen %vl) {
2308 ; CHECK-LABEL: test_sf_vc_v_ivv_e16m8:
2309 ; CHECK: # %bb.0: # %entry
2310 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2311 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2314 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
2315 ret <vscale x 32 x i16> %0
2318 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen, iXLen)
2320 define <vscale x 1 x i32> @test_sf_vc_v_ivv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
2321 ; CHECK-LABEL: test_sf_vc_v_ivv_e32mf2:
2322 ; CHECK: # %bb.0: # %entry
2323 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2324 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2327 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
2328 ret <vscale x 1 x i32> %0
2331 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen, iXLen)
2333 define <vscale x 2 x i32> @test_sf_vc_v_ivv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
2334 ; CHECK-LABEL: test_sf_vc_v_ivv_e32m1:
2335 ; CHECK: # %bb.0: # %entry
2336 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2337 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2340 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
2341 ret <vscale x 2 x i32> %0
2344 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen, iXLen)
2346 define <vscale x 4 x i32> @test_sf_vc_v_ivv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
2347 ; CHECK-LABEL: test_sf_vc_v_ivv_e32m2:
2348 ; CHECK: # %bb.0: # %entry
2349 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2350 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2353 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
2354 ret <vscale x 4 x i32> %0
2357 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen, iXLen)
2359 define <vscale x 8 x i32> @test_sf_vc_v_ivv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
2360 ; CHECK-LABEL: test_sf_vc_v_ivv_e32m4:
2361 ; CHECK: # %bb.0: # %entry
2362 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2363 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2366 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
2367 ret <vscale x 8 x i32> %0
2370 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen, iXLen)
2372 define <vscale x 16 x i32> @test_sf_vc_v_ivv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen %vl) {
2373 ; CHECK-LABEL: test_sf_vc_v_ivv_e32m8:
2374 ; CHECK: # %bb.0: # %entry
2375 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2376 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2379 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
2380 ret <vscale x 16 x i32> %0
2383 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen, iXLen)
2385 define <vscale x 1 x i64> @test_sf_vc_v_ivv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen %vl) {
2386 ; CHECK-LABEL: test_sf_vc_v_ivv_e64m1:
2387 ; CHECK: # %bb.0: # %entry
2388 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2389 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
2392 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
2393 ret <vscale x 1 x i64> %0
2396 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen, iXLen)
2398 define <vscale x 2 x i64> @test_sf_vc_v_ivv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen %vl) {
2399 ; CHECK-LABEL: test_sf_vc_v_ivv_e64m2:
2400 ; CHECK: # %bb.0: # %entry
2401 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2402 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
2405 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
2406 ret <vscale x 2 x i64> %0
2409 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen, iXLen)
2411 define <vscale x 4 x i64> @test_sf_vc_v_ivv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen %vl) {
2412 ; CHECK-LABEL: test_sf_vc_v_ivv_e64m4:
2413 ; CHECK: # %bb.0: # %entry
2414 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2415 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
2418 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
2419 ret <vscale x 4 x i64> %0
2422 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen, iXLen)
2424 define <vscale x 8 x i64> @test_sf_vc_v_ivv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen %vl) {
2425 ; CHECK-LABEL: test_sf_vc_v_ivv_e64m8:
2426 ; CHECK: # %bb.0: # %entry
2427 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2428 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
2431 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
2432 ret <vscale x 8 x i64> %0
2435 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen, iXLen)
2437 define void @test_sf_vc_fvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
2438 ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf4:
2439 ; CHECK: # %bb.0: # %entry
2440 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2441 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
2444 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
2448 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
2450 define void @test_sf_vc_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
2451 ; CHECK-LABEL: test_sf_vc_fvv_se_e16mf2:
2452 ; CHECK: # %bb.0: # %entry
2453 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2454 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
2457 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
2461 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
2463 define void @test_sf_vc_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
2464 ; CHECK-LABEL: test_sf_vc_fvv_se_e16m1:
2465 ; CHECK: # %bb.0: # %entry
2466 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2467 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
2470 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
2474 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
2476 define void @test_sf_vc_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
2477 ; CHECK-LABEL: test_sf_vc_fvv_se_e16m2:
2478 ; CHECK: # %bb.0: # %entry
2479 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2480 ; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
2483 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
2487 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
2489 define void @test_sf_vc_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
2490 ; CHECK-LABEL: test_sf_vc_fvv_se_e16m4:
2491 ; CHECK: # %bb.0: # %entry
2492 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2493 ; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
2496 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
2500 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
2502 define void @test_sf_vc_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
2503 ; CHECK-LABEL: test_sf_vc_fvv_se_e16m8:
2504 ; CHECK: # %bb.0: # %entry
2505 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2506 ; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
2509 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
2513 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32i16.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
2515 define void @test_sf_vc_fvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
2516 ; CHECK-LABEL: test_sf_vc_fvv_se_e32mf2:
2517 ; CHECK: # %bb.0: # %entry
2518 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2519 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
2522 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
2526 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
2528 define void @test_sf_vc_fvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
2529 ; CHECK-LABEL: test_sf_vc_fvv_se_e32m1:
2530 ; CHECK: # %bb.0: # %entry
2531 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2532 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
2535 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
2539 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
2541 define void @test_sf_vc_fvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
2542 ; CHECK-LABEL: test_sf_vc_fvv_se_e32m2:
2543 ; CHECK: # %bb.0: # %entry
2544 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2545 ; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
2548 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
2552 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
2554 define void @test_sf_vc_fvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
2555 ; CHECK-LABEL: test_sf_vc_fvv_se_e32m4:
2556 ; CHECK: # %bb.0: # %entry
2557 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2558 ; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
2561 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
2565 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
2567 define void @test_sf_vc_fvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
2568 ; CHECK-LABEL: test_sf_vc_fvv_se_e32m8:
2569 ; CHECK: # %bb.0: # %entry
2570 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2571 ; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
2574 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
2578 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16i32.f32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
2580 define void @test_sf_vc_fvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
2581 ; CHECK-LABEL: test_sf_vc_fvv_se_e64m1:
2582 ; CHECK: # %bb.0: # %entry
2583 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2584 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
2587 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
2591 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1i64.f64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
2593 define void @test_sf_vc_fvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
2594 ; CHECK-LABEL: test_sf_vc_fvv_se_e64m2:
2595 ; CHECK: # %bb.0: # %entry
2596 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2597 ; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
2600 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
2604 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2i64.f64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
2606 define void @test_sf_vc_fvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
2607 ; CHECK-LABEL: test_sf_vc_fvv_se_e64m4:
2608 ; CHECK: # %bb.0: # %entry
2609 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2610 ; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
2613 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
2617 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4i64.f64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
2619 define void @test_sf_vc_fvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
2620 ; CHECK-LABEL: test_sf_vc_fvv_se_e64m8:
2621 ; CHECK: # %bb.0: # %entry
2622 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2623 ; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
2626 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
2630 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8i64.f64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
2632 define <vscale x 1 x i16> @test_sf_vc_v_fvv_se_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
2633 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf4:
2634 ; CHECK: # %bb.0: # %entry
2635 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2636 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2639 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
2640 ret <vscale x 1 x i16> %0
2643 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
2645 define <vscale x 2 x i16> @test_sf_vc_v_fvv_se_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
2646 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16mf2:
2647 ; CHECK: # %bb.0: # %entry
2648 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2649 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2652 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
2653 ret <vscale x 2 x i16> %0
2656 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
2658 define <vscale x 4 x i16> @test_sf_vc_v_fvv_se_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
2659 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m1:
2660 ; CHECK: # %bb.0: # %entry
2661 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2662 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2665 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
2666 ret <vscale x 4 x i16> %0
2669 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
2671 define <vscale x 8 x i16> @test_sf_vc_v_fvv_se_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
2672 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m2:
2673 ; CHECK: # %bb.0: # %entry
2674 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2675 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
2678 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
2679 ret <vscale x 8 x i16> %0
2682 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
2684 define <vscale x 16 x i16> @test_sf_vc_v_fvv_se_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
2685 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m4:
2686 ; CHECK: # %bb.0: # %entry
2687 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2688 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
2691 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
2692 ret <vscale x 16 x i16> %0
2695 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
2697 define <vscale x 32 x i16> @test_sf_vc_v_fvv_se_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
2698 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e16m8:
2699 ; CHECK: # %bb.0: # %entry
2700 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2701 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
2704 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
2705 ret <vscale x 32 x i16> %0
2708 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
2710 define <vscale x 1 x i32> @test_sf_vc_v_fvv_se_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
2711 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32mf2:
2712 ; CHECK: # %bb.0: # %entry
2713 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2714 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2717 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
2718 ret <vscale x 1 x i32> %0
2721 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv1i32.iXLen.f32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
2723 define <vscale x 2 x i32> @test_sf_vc_v_fvv_se_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
2724 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m1:
2725 ; CHECK: # %bb.0: # %entry
2726 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2727 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2730 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
2731 ret <vscale x 2 x i32> %0
2734 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv2i32.iXLen.f32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
2736 define <vscale x 4 x i32> @test_sf_vc_v_fvv_se_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
2737 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m2:
2738 ; CHECK: # %bb.0: # %entry
2739 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2740 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
2743 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
2744 ret <vscale x 4 x i32> %0
2747 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv4i32.iXLen.f32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
2749 define <vscale x 8 x i32> @test_sf_vc_v_fvv_se_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
2750 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m4:
2751 ; CHECK: # %bb.0: # %entry
2752 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2753 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
2756 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
2757 ret <vscale x 8 x i32> %0
2760 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv8i32.iXLen.f32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
2762 define <vscale x 16 x i32> @test_sf_vc_v_fvv_se_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
2763 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e32m8:
2764 ; CHECK: # %bb.0: # %entry
2765 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2766 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
2769 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
2770 ret <vscale x 16 x i32> %0
2773 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.se.nxv16i32.iXLen.f32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
2775 define <vscale x 1 x i64> @test_sf_vc_v_fvv_se_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
2776 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m1:
2777 ; CHECK: # %bb.0: # %entry
2778 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2779 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2782 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
2783 ret <vscale x 1 x i64> %0
2786 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv1i64.iXLen.f64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
2788 define <vscale x 2 x i64> @test_sf_vc_v_fvv_se_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
2789 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m2:
2790 ; CHECK: # %bb.0: # %entry
2791 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2792 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
2795 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
2796 ret <vscale x 2 x i64> %0
2799 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv2i64.iXLen.f64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
2801 define <vscale x 4 x i64> @test_sf_vc_v_fvv_se_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
2802 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m4:
2803 ; CHECK: # %bb.0: # %entry
2804 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2805 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
2808 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
2809 ret <vscale x 4 x i64> %0
2812 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv4i64.iXLen.f64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
2814 define <vscale x 8 x i64> @test_sf_vc_v_fvv_se_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
2815 ; CHECK-LABEL: test_sf_vc_v_fvv_se_e64m8:
2816 ; CHECK: # %bb.0: # %entry
2817 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2818 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
2821 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
2822 ret <vscale x 8 x i64> %0
2825 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.se.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
2827 define <vscale x 1 x i16> @test_sf_vc_v_fvv_e16mf4(<vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
2828 ; CHECK-LABEL: test_sf_vc_v_fvv_e16mf4:
2829 ; CHECK: # %bb.0: # %entry
2830 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2831 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2834 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
2835 ret <vscale x 1 x i16> %0
2838 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fvv.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, half, iXLen)
2840 define <vscale x 2 x i16> @test_sf_vc_v_fvv_e16mf2(<vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
2841 ; CHECK-LABEL: test_sf_vc_v_fvv_e16mf2:
2842 ; CHECK: # %bb.0: # %entry
2843 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2844 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2847 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
2848 ret <vscale x 2 x i16> %0
2851 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fvv.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, half, iXLen)
2853 define <vscale x 4 x i16> @test_sf_vc_v_fvv_e16m1(<vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
2854 ; CHECK-LABEL: test_sf_vc_v_fvv_e16m1:
2855 ; CHECK: # %bb.0: # %entry
2856 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2857 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2860 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
2861 ret <vscale x 4 x i16> %0
2864 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fvv.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, half, iXLen)
2866 define <vscale x 8 x i16> @test_sf_vc_v_fvv_e16m2(<vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
2867 ; CHECK-LABEL: test_sf_vc_v_fvv_e16m2:
2868 ; CHECK: # %bb.0: # %entry
2869 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2870 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
2873 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
2874 ret <vscale x 8 x i16> %0
2877 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fvv.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, half, iXLen)
2879 define <vscale x 16 x i16> @test_sf_vc_v_fvv_e16m4(<vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
2880 ; CHECK-LABEL: test_sf_vc_v_fvv_e16m4:
2881 ; CHECK: # %bb.0: # %entry
2882 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2883 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
2886 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
2887 ret <vscale x 16 x i16> %0
2890 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fvv.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, half, iXLen)
2892 define <vscale x 32 x i16> @test_sf_vc_v_fvv_e16m8(<vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
2893 ; CHECK-LABEL: test_sf_vc_v_fvv_e16m8:
2894 ; CHECK: # %bb.0: # %entry
2895 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2896 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
2899 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vd, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
2900 ret <vscale x 32 x i16> %0
2903 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fvv.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, half, iXLen)
2905 define <vscale x 1 x i32> @test_sf_vc_v_fvv_e32mf2(<vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
2906 ; CHECK-LABEL: test_sf_vc_v_fvv_e32mf2:
2907 ; CHECK: # %bb.0: # %entry
2908 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2909 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2912 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
2913 ret <vscale x 1 x i32> %0
2916 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvv.nxv1i32.iXLen.f32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, float, iXLen)
2918 define <vscale x 2 x i32> @test_sf_vc_v_fvv_e32m1(<vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
2919 ; CHECK-LABEL: test_sf_vc_v_fvv_e32m1:
2920 ; CHECK: # %bb.0: # %entry
2921 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2922 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2925 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
2926 ret <vscale x 2 x i32> %0
2929 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvv.nxv2i32.iXLen.f32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, float, iXLen)
2931 define <vscale x 4 x i32> @test_sf_vc_v_fvv_e32m2(<vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
2932 ; CHECK-LABEL: test_sf_vc_v_fvv_e32m2:
2933 ; CHECK: # %bb.0: # %entry
2934 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2935 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
2938 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
2939 ret <vscale x 4 x i32> %0
2942 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvv.nxv4i32.iXLen.f32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, float, iXLen)
2944 define <vscale x 8 x i32> @test_sf_vc_v_fvv_e32m4(<vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
2945 ; CHECK-LABEL: test_sf_vc_v_fvv_e32m4:
2946 ; CHECK: # %bb.0: # %entry
2947 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2948 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
2951 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
2952 ret <vscale x 8 x i32> %0
2955 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvv.nxv8i32.iXLen.f32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, float, iXLen)
2957 define <vscale x 16 x i32> @test_sf_vc_v_fvv_e32m8(<vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
2958 ; CHECK-LABEL: test_sf_vc_v_fvv_e32m8:
2959 ; CHECK: # %bb.0: # %entry
2960 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2961 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
2964 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
2965 ret <vscale x 16 x i32> %0
2968 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvv.nxv16i32.iXLen.f32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, float, iXLen)
2970 define <vscale x 1 x i64> @test_sf_vc_v_fvv_e64m1(<vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
2971 ; CHECK-LABEL: test_sf_vc_v_fvv_e64m1:
2972 ; CHECK: # %bb.0: # %entry
2973 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2974 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
2977 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
2978 ret <vscale x 1 x i64> %0
2981 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvv.nxv1i64.iXLen.f64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, double, iXLen)
2983 define <vscale x 2 x i64> @test_sf_vc_v_fvv_e64m2(<vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
2984 ; CHECK-LABEL: test_sf_vc_v_fvv_e64m2:
2985 ; CHECK: # %bb.0: # %entry
2986 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2987 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
2990 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
2991 ret <vscale x 2 x i64> %0
2994 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvv.nxv2i64.iXLen.f64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, double, iXLen)
2996 define <vscale x 4 x i64> @test_sf_vc_v_fvv_e64m4(<vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
2997 ; CHECK-LABEL: test_sf_vc_v_fvv_e64m4:
2998 ; CHECK: # %bb.0: # %entry
2999 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
3000 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
3003 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
3004 ret <vscale x 4 x i64> %0
3007 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvv.nxv4i64.iXLen.f64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, double, iXLen)
3009 define <vscale x 8 x i64> @test_sf_vc_v_fvv_e64m8(<vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
3010 ; CHECK-LABEL: test_sf_vc_v_fvv_e64m8:
3011 ; CHECK: # %bb.0: # %entry
3012 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
3013 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
3016 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
3017 ret <vscale x 8 x i64> %0
3020 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvv.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, double, iXLen)
3022 define void @test_f_sf_vc_vvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
3023 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e16mf4:
3024 ; CHECK: # %bb.0: # %entry
3025 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3026 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
3029 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
3033 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
3035 define void @test_f_sf_vc_vvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
3036 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e16mf2:
3037 ; CHECK: # %bb.0: # %entry
3038 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3039 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
3042 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
3046 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
3048 define void @test_f_sf_vc_vvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
3049 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m1:
3050 ; CHECK: # %bb.0: # %entry
3051 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3052 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
3055 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
3059 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
3061 define void @test_f_sf_vc_vvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
3062 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m2:
3063 ; CHECK: # %bb.0: # %entry
3064 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3065 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
3068 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
3072 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
3074 define void @test_f_sf_vc_vvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
3075 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m4:
3076 ; CHECK: # %bb.0: # %entry
3077 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3078 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
3081 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
3085 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
3087 define void @test_f_sf_vc_vvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
3088 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e16m8:
3089 ; CHECK: # %bb.0: # %entry
3090 ; CHECK-NEXT: vl8re16.v v24, (a0)
3091 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3092 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
3095 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
3099 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
3101 define void @test_f_sf_vc_vvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
3102 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e32mf2:
3103 ; CHECK: # %bb.0: # %entry
3104 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3105 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
3108 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
3112 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
3114 define void @test_f_sf_vc_vvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
3115 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m1:
3116 ; CHECK: # %bb.0: # %entry
3117 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3118 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
3121 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
3125 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
3127 define void @test_f_sf_vc_vvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
3128 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m2:
3129 ; CHECK: # %bb.0: # %entry
3130 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3131 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
3134 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
3138 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
3140 define void @test_f_sf_vc_vvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
3141 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m4:
3142 ; CHECK: # %bb.0: # %entry
3143 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3144 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
3147 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
3151 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
3153 define void @test_f_sf_vc_vvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
3154 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e32m8:
3155 ; CHECK: # %bb.0: # %entry
3156 ; CHECK-NEXT: vl8re32.v v24, (a0)
3157 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3158 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
3161 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
3165 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
3167 define void @test_f_sf_vc_vvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
3168 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m1:
3169 ; CHECK: # %bb.0: # %entry
3170 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
3171 ; CHECK-NEXT: sf.vc.vvv 3, v8, v9, v10
3174 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
3178 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
3180 define void @test_f_sf_vc_vvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
3181 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m2:
3182 ; CHECK: # %bb.0: # %entry
3183 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3184 ; CHECK-NEXT: sf.vc.vvv 3, v8, v10, v12
3187 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
3191 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
3193 define void @test_f_sf_vc_vvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
3194 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m4:
3195 ; CHECK: # %bb.0: # %entry
3196 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
3197 ; CHECK-NEXT: sf.vc.vvv 3, v8, v12, v16
3200 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
3204 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
3206 define void @test_f_sf_vc_vvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
3207 ; CHECK-LABEL: test_f_sf_vc_vvv_se_e64m8:
3208 ; CHECK: # %bb.0: # %entry
3209 ; CHECK-NEXT: vl8re64.v v24, (a0)
3210 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3211 ; CHECK-NEXT: sf.vc.vvv 3, v8, v16, v24
3214 tail call void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
3218 declare void @llvm.riscv.sf.vc.vvv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
3220 define <vscale x 1 x half> @test_f_sf_vc_v_vvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
3221 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16mf4:
3222 ; CHECK: # %bb.0: # %entry
3223 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3224 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3227 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
3228 ret <vscale x 1 x half> %0
3231 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
3233 define <vscale x 2 x half> @test_f_sf_vc_v_vvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
3234 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16mf2:
3235 ; CHECK: # %bb.0: # %entry
3236 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3237 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3240 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
3241 ret <vscale x 2 x half> %0
3244 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
3246 define <vscale x 4 x half> @test_f_sf_vc_v_vvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
3247 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m1:
3248 ; CHECK: # %bb.0: # %entry
3249 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3250 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3253 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
3254 ret <vscale x 4 x half> %0
3257 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
3259 define <vscale x 8 x half> @test_f_sf_vc_v_vvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
3260 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m2:
3261 ; CHECK: # %bb.0: # %entry
3262 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3263 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
3266 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
3267 ret <vscale x 8 x half> %0
3270 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
3272 define <vscale x 16 x half> @test_f_sf_vc_v_vvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
3273 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m4:
3274 ; CHECK: # %bb.0: # %entry
3275 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3276 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
3279 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
3280 ret <vscale x 16 x half> %0
3283 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
3285 define <vscale x 32 x half> @test_f_sf_vc_v_vvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
3286 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e16m8:
3287 ; CHECK: # %bb.0: # %entry
3288 ; CHECK-NEXT: vl8re16.v v24, (a0)
3289 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3290 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
3293 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
3294 ret <vscale x 32 x half> %0
3297 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
3299 define <vscale x 1 x float> @test_f_sf_vc_v_vvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
3300 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32mf2:
3301 ; CHECK: # %bb.0: # %entry
3302 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3303 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3306 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
3307 ret <vscale x 1 x float> %0
3310 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
3312 define <vscale x 2 x float> @test_f_sf_vc_v_vvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
3313 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m1:
3314 ; CHECK: # %bb.0: # %entry
3315 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3316 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3319 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
3320 ret <vscale x 2 x float> %0
3323 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
3325 define <vscale x 4 x float> @test_f_sf_vc_v_vvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
3326 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m2:
3327 ; CHECK: # %bb.0: # %entry
3328 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3329 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
3332 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
3333 ret <vscale x 4 x float> %0
3336 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
3338 define <vscale x 8 x float> @test_f_sf_vc_v_vvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
3339 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m4:
3340 ; CHECK: # %bb.0: # %entry
3341 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3342 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
3345 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
3346 ret <vscale x 8 x float> %0
3349 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
3351 define <vscale x 16 x float> @test_f_sf_vc_v_vvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
3352 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e32m8:
3353 ; CHECK: # %bb.0: # %entry
3354 ; CHECK-NEXT: vl8re32.v v24, (a0)
3355 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3356 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
3359 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
3360 ret <vscale x 16 x float> %0
3363 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
3365 define <vscale x 1 x double> @test_f_sf_vc_v_vvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
3366 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m1:
3367 ; CHECK: # %bb.0: # %entry
3368 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
3369 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3372 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
3373 ret <vscale x 1 x double> %0
3376 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
3378 define <vscale x 2 x double> @test_f_sf_vc_v_vvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
3379 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m2:
3380 ; CHECK: # %bb.0: # %entry
3381 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3382 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
3385 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
3386 ret <vscale x 2 x double> %0
3389 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
3391 define <vscale x 4 x double> @test_f_sf_vc_v_vvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
3392 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m4:
3393 ; CHECK: # %bb.0: # %entry
3394 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
3395 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
3398 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
3399 ret <vscale x 4 x double> %0
3402 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
3404 define <vscale x 8 x double> @test_f_sf_vc_v_vvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
3405 ; CHECK-LABEL: test_f_sf_vc_v_vvv_se_e64m8:
3406 ; CHECK: # %bb.0: # %entry
3407 ; CHECK-NEXT: vl8re64.v v24, (a0)
3408 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3409 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
3412 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
3413 ret <vscale x 8 x double> %0
3416 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
3418 define <vscale x 1 x half> @test_f_sf_vc_v_vvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
3419 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e16mf4:
3420 ; CHECK: # %bb.0: # %entry
3421 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3422 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3425 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
3426 ret <vscale x 1 x half> %0
3429 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vvv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
3431 define <vscale x 2 x half> @test_f_sf_vc_v_vvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
3432 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e16mf2:
3433 ; CHECK: # %bb.0: # %entry
3434 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3435 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3438 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
3439 ret <vscale x 2 x half> %0
3442 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vvv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
3444 define <vscale x 4 x half> @test_f_sf_vc_v_vvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
3445 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m1:
3446 ; CHECK: # %bb.0: # %entry
3447 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3448 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3451 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
3452 ret <vscale x 4 x half> %0
3455 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vvv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
3457 define <vscale x 8 x half> @test_f_sf_vc_v_vvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
3458 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m2:
3459 ; CHECK: # %bb.0: # %entry
3460 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3461 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
3464 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
3465 ret <vscale x 8 x half> %0
3468 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vvv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
3470 define <vscale x 16 x half> @test_f_sf_vc_v_vvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
3471 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m4:
3472 ; CHECK: # %bb.0: # %entry
3473 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3474 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
3477 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
3478 ret <vscale x 16 x half> %0
3481 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vvv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
3483 define <vscale x 32 x half> @test_f_sf_vc_v_vvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
3484 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e16m8:
3485 ; CHECK: # %bb.0: # %entry
3486 ; CHECK-NEXT: vl8re16.v v24, (a0)
3487 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3488 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
3491 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
3492 ret <vscale x 32 x half> %0
3495 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vvv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
3497 define <vscale x 1 x float> @test_f_sf_vc_v_vvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
3498 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e32mf2:
3499 ; CHECK: # %bb.0: # %entry
3500 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3501 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3504 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
3505 ret <vscale x 1 x float> %0
3508 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
3510 define <vscale x 2 x float> @test_f_sf_vc_v_vvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
3511 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m1:
3512 ; CHECK: # %bb.0: # %entry
3513 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3514 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3517 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
3518 ret <vscale x 2 x float> %0
3521 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
3523 define <vscale x 4 x float> @test_f_sf_vc_v_vvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
3524 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m2:
3525 ; CHECK: # %bb.0: # %entry
3526 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3527 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
3530 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
3531 ret <vscale x 4 x float> %0
3534 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
3536 define <vscale x 8 x float> @test_f_sf_vc_v_vvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
3537 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m4:
3538 ; CHECK: # %bb.0: # %entry
3539 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3540 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
3543 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
3544 ret <vscale x 8 x float> %0
3547 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
3549 define <vscale x 16 x float> @test_f_sf_vc_v_vvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
3550 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e32m8:
3551 ; CHECK: # %bb.0: # %entry
3552 ; CHECK-NEXT: vl8re32.v v24, (a0)
3553 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3554 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
3557 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
3558 ret <vscale x 16 x float> %0
3561 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
3563 define <vscale x 1 x double> @test_f_sf_vc_v_vvv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
3564 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m1:
3565 ; CHECK: # %bb.0: # %entry
3566 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
3567 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v9, v10
3570 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
3571 ret <vscale x 1 x double> %0
3574 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
3576 define <vscale x 2 x double> @test_f_sf_vc_v_vvv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
3577 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m2:
3578 ; CHECK: # %bb.0: # %entry
3579 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3580 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v10, v12
3583 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
3584 ret <vscale x 2 x double> %0
3587 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
3589 define <vscale x 4 x double> @test_f_sf_vc_v_vvv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
3590 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m4:
3591 ; CHECK: # %bb.0: # %entry
3592 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
3593 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v12, v16
3596 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
3597 ret <vscale x 4 x double> %0
3600 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
3602 define <vscale x 8 x double> @test_f_sf_vc_v_vvv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
3603 ; CHECK-LABEL: test_f_sf_vc_v_vvv_e64m8:
3604 ; CHECK: # %bb.0: # %entry
3605 ; CHECK-NEXT: vl8re64.v v24, (a0)
3606 ; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
3607 ; CHECK-NEXT: sf.vc.v.vvv 3, v8, v16, v24
3610 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
3611 ret <vscale x 8 x double> %0
3614 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
3616 define void @test_f_sf_vc_xvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3617 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e16mf4:
3618 ; CHECK: # %bb.0: # %entry
3619 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3620 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
3623 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
3627 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
3629 define void @test_f_sf_vc_xvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3630 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e16mf2:
3631 ; CHECK: # %bb.0: # %entry
3632 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3633 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
3636 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
3640 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
3642 define void @test_f_sf_vc_xvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3643 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m1:
3644 ; CHECK: # %bb.0: # %entry
3645 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3646 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
3649 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
3653 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
3655 define void @test_f_sf_vc_xvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3656 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m2:
3657 ; CHECK: # %bb.0: # %entry
3658 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3659 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
3662 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
3666 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
3668 define void @test_f_sf_vc_xvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3669 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m4:
3670 ; CHECK: # %bb.0: # %entry
3671 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3672 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
3675 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
3679 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
3681 define void @test_f_sf_vc_xvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3682 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e16m8:
3683 ; CHECK: # %bb.0: # %entry
3684 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3685 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
3688 tail call void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
3692 declare void @llvm.riscv.sf.vc.xvv.se.iXLen.nxv32f16.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
3694 define void @test_f_sf_vc_xvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3695 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e32mf2:
3696 ; CHECK: # %bb.0: # %entry
3697 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3698 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
3701 tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
3705 declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
3707 define void @test_f_sf_vc_xvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3708 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m1:
3709 ; CHECK: # %bb.0: # %entry
3710 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3711 ; CHECK-NEXT: sf.vc.xvv 3, v8, v9, a0
3714 tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
3718 declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
3720 define void @test_f_sf_vc_xvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3721 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m2:
3722 ; CHECK: # %bb.0: # %entry
3723 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3724 ; CHECK-NEXT: sf.vc.xvv 3, v8, v10, a0
3727 tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
3731 declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
3733 define void @test_f_sf_vc_xvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3734 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m4:
3735 ; CHECK: # %bb.0: # %entry
3736 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3737 ; CHECK-NEXT: sf.vc.xvv 3, v8, v12, a0
3740 tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
3744 declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
3746 define void @test_f_sf_vc_xvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3747 ; CHECK-LABEL: test_f_sf_vc_xvv_se_e32m8:
3748 ; CHECK: # %bb.0: # %entry
3749 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3750 ; CHECK-NEXT: sf.vc.xvv 3, v8, v16, a0
3753 tail call void @llvm.riscv.sf.vc.xvv.se.f32.nxv16f32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
3757 declare void @llvm.riscv.sf.vc.xvv.se.f32.nxv16f32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
3759 define <vscale x 1 x half> @test_f_sf_vc_v_xvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3760 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16mf4:
3761 ; CHECK: # %bb.0: # %entry
3762 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3763 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3766 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.iXLen.f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
3767 ret <vscale x 1 x half> %0
3770 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
3772 define <vscale x 2 x half> @test_f_sf_vc_v_xvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3773 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16mf2:
3774 ; CHECK: # %bb.0: # %entry
3775 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3776 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3779 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.iXLen.f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
3780 ret <vscale x 2 x half> %0
3783 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
3785 define <vscale x 4 x half> @test_f_sf_vc_v_xvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3786 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m1:
3787 ; CHECK: # %bb.0: # %entry
3788 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3789 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3792 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.iXLen.f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
3793 ret <vscale x 4 x half> %0
3796 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
3798 define <vscale x 8 x half> @test_f_sf_vc_v_xvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3799 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m2:
3800 ; CHECK: # %bb.0: # %entry
3801 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3802 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
3805 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.iXLen.f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
3806 ret <vscale x 8 x half> %0
3809 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
3811 define <vscale x 16 x half> @test_f_sf_vc_v_xvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3812 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m4:
3813 ; CHECK: # %bb.0: # %entry
3814 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3815 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
3818 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.iXLen.f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
3819 ret <vscale x 16 x half> %0
3822 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
3824 define <vscale x 32 x half> @test_f_sf_vc_v_xvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3825 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e16m8:
3826 ; CHECK: # %bb.0: # %entry
3827 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3828 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
3831 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.iXLen.f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
3832 ret <vscale x 32 x half> %0
3835 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.se.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
3837 define <vscale x 1 x float> @test_f_sf_vc_v_xvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3838 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32mf2:
3839 ; CHECK: # %bb.0: # %entry
3840 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3841 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3844 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.iXLen.f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
3845 ret <vscale x 1 x float> %0
3848 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
3850 define <vscale x 2 x float> @test_f_sf_vc_v_xvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3851 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m1:
3852 ; CHECK: # %bb.0: # %entry
3853 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3854 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3857 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.iXLen.f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
3858 ret <vscale x 2 x float> %0
3861 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
3863 define <vscale x 4 x float> @test_f_sf_vc_v_xvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3864 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m2:
3865 ; CHECK: # %bb.0: # %entry
3866 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3867 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
3870 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.iXLen.f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
3871 ret <vscale x 4 x float> %0
3874 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
3876 define <vscale x 8 x float> @test_f_sf_vc_v_xvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3877 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m4:
3878 ; CHECK: # %bb.0: # %entry
3879 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3880 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
3883 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.iXLen.f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
3884 ret <vscale x 8 x float> %0
3887 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
3889 define <vscale x 16 x float> @test_f_sf_vc_v_xvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3890 ; CHECK-LABEL: test_f_sf_vc_v_xvv_se_e32m8:
3891 ; CHECK: # %bb.0: # %entry
3892 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3893 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
3896 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.iXLen.f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
3897 ret <vscale x 16 x float> %0
3900 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.se.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
3902 define <vscale x 1 x half> @test_f_sf_vc_v_xvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3903 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e16mf4:
3904 ; CHECK: # %bb.0: # %entry
3905 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3906 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3909 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.nxv1f16.iXLen.f16.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
3910 ret <vscale x 1 x half> %0
3913 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xvv.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, i16, iXLen)
3915 define <vscale x 2 x half> @test_f_sf_vc_v_xvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3916 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e16mf2:
3917 ; CHECK: # %bb.0: # %entry
3918 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3919 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3922 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.nxv2f16.iXLen.f16.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
3923 ret <vscale x 2 x half> %0
3926 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xvv.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, i16, iXLen)
3928 define <vscale x 4 x half> @test_f_sf_vc_v_xvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3929 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m1:
3930 ; CHECK: # %bb.0: # %entry
3931 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3932 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3935 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.nxv4f16.iXLen.f16.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
3936 ret <vscale x 4 x half> %0
3939 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xvv.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, i16, iXLen)
3941 define <vscale x 8 x half> @test_f_sf_vc_v_xvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3942 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m2:
3943 ; CHECK: # %bb.0: # %entry
3944 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3945 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
3948 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.nxv8f16.iXLen.f16.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
3949 ret <vscale x 8 x half> %0
3952 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xvv.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, i16, iXLen)
3954 define <vscale x 16 x half> @test_f_sf_vc_v_xvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3955 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m4:
3956 ; CHECK: # %bb.0: # %entry
3957 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3958 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
3961 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.nxv16f16.iXLen.f16.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
3962 ret <vscale x 16 x half> %0
3965 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xvv.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, i16, iXLen)
3967 define <vscale x 32 x half> @test_f_sf_vc_v_xvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3968 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e16m8:
3969 ; CHECK: # %bb.0: # %entry
3970 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3971 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
3974 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.nxv32f16.iXLen.f16.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
3975 ret <vscale x 32 x half> %0
3978 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xvv.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, i16, iXLen)
3980 define <vscale x 1 x float> @test_f_sf_vc_v_xvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3981 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e32mf2:
3982 ; CHECK: # %bb.0: # %entry
3983 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3984 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
3987 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.nxv1f32.iXLen.f32.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
3988 ret <vscale x 1 x float> %0
3991 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvv.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, i32, iXLen)
3993 define <vscale x 2 x float> @test_f_sf_vc_v_xvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3994 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m1:
3995 ; CHECK: # %bb.0: # %entry
3996 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3997 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v9, a0
4000 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.nxv2f32.iXLen.f32.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
4001 ret <vscale x 2 x float> %0
4004 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvv.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, i32, iXLen)
4006 define <vscale x 4 x float> @test_f_sf_vc_v_xvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
4007 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m2:
4008 ; CHECK: # %bb.0: # %entry
4009 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4010 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v10, a0
4013 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.nxv4f32.iXLen.f32.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
4014 ret <vscale x 4 x float> %0
4017 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvv.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, i32, iXLen)
4019 define <vscale x 8 x float> @test_f_sf_vc_v_xvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
4020 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m4:
4021 ; CHECK: # %bb.0: # %entry
4022 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4023 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v12, a0
4026 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.nxv8f32.iXLen.f32.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
4027 ret <vscale x 8 x float> %0
4030 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvv.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, i32, iXLen)
4032 define <vscale x 16 x float> @test_f_sf_vc_v_xvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
4033 ; CHECK-LABEL: test_f_sf_vc_v_xvv_e32m8:
4034 ; CHECK: # %bb.0: # %entry
4035 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
4036 ; CHECK-NEXT: sf.vc.v.xvv 3, v8, v16, a0
4039 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.nxv16f32.iXLen.f32.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
4040 ret <vscale x 16 x float> %0
4043 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvv.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, i32, iXLen)
4045 define void @test_f_sf_vc_ivv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
4046 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e16mf4:
4047 ; CHECK: # %bb.0: # %entry
4048 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4049 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
4052 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
4056 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
4058 define void @test_f_sf_vc_ivv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
4059 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e16mf2:
4060 ; CHECK: # %bb.0: # %entry
4061 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4062 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
4065 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
4069 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
4071 define void @test_f_sf_vc_ivv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
4072 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m1:
4073 ; CHECK: # %bb.0: # %entry
4074 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4075 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
4078 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
4082 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
4084 define void @test_f_sf_vc_ivv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
4085 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m2:
4086 ; CHECK: # %bb.0: # %entry
4087 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4088 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
4091 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
4095 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
4097 define void @test_f_sf_vc_ivv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
4098 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m4:
4099 ; CHECK: # %bb.0: # %entry
4100 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4101 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
4104 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
4108 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
4110 define void @test_f_sf_vc_ivv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
4111 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e16m8:
4112 ; CHECK: # %bb.0: # %entry
4113 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4114 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
4117 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
4121 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
4123 define void @test_f_sf_vc_ivv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
4124 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e32mf2:
4125 ; CHECK: # %bb.0: # %entry
4126 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4127 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
4130 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
4134 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
4136 define void @test_f_sf_vc_ivv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
4137 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m1:
4138 ; CHECK: # %bb.0: # %entry
4139 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4140 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
4143 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
4147 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
4149 define void @test_f_sf_vc_ivv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
4150 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m2:
4151 ; CHECK: # %bb.0: # %entry
4152 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4153 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
4156 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
4160 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
4162 define void @test_f_sf_vc_ivv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
4163 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m4:
4164 ; CHECK: # %bb.0: # %entry
4165 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4166 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
4169 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
4173 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
4175 define void @test_f_sf_vc_ivv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
4176 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e32m8:
4177 ; CHECK: # %bb.0: # %entry
4178 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4179 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
4182 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
4186 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
4188 define void @test_f_sf_vc_ivv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
4189 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m1:
4190 ; CHECK: # %bb.0: # %entry
4191 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4192 ; CHECK-NEXT: sf.vc.ivv 3, v8, v9, 10
4195 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
4199 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
4201 define void @test_f_sf_vc_ivv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
4202 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m2:
4203 ; CHECK: # %bb.0: # %entry
4204 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4205 ; CHECK-NEXT: sf.vc.ivv 3, v8, v10, 10
4208 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
4212 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
4214 define void @test_f_sf_vc_ivv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
4215 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m4:
4216 ; CHECK: # %bb.0: # %entry
4217 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4218 ; CHECK-NEXT: sf.vc.ivv 3, v8, v12, 10
4221 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
4225 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
4227 define void @test_f_sf_vc_ivv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
4228 ; CHECK-LABEL: test_f_sf_vc_ivv_se_e64m8:
4229 ; CHECK: # %bb.0: # %entry
4230 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4231 ; CHECK-NEXT: sf.vc.ivv 3, v8, v16, 10
4234 tail call void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
4238 declare void @llvm.riscv.sf.vc.ivv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
4240 define <vscale x 1 x half> @test_f_sf_vc_v_ivv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
4241 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16mf4:
4242 ; CHECK: # %bb.0: # %entry
4243 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4244 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4247 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
4248 ret <vscale x 1 x half> %0
4251 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
4253 define <vscale x 2 x half> @test_f_sf_vc_v_ivv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
4254 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16mf2:
4255 ; CHECK: # %bb.0: # %entry
4256 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4257 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4260 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
4261 ret <vscale x 2 x half> %0
4264 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
4266 define <vscale x 4 x half> @test_f_sf_vc_v_ivv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
4267 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m1:
4268 ; CHECK: # %bb.0: # %entry
4269 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4270 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4273 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
4274 ret <vscale x 4 x half> %0
4277 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
4279 define <vscale x 8 x half> @test_f_sf_vc_v_ivv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
4280 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m2:
4281 ; CHECK: # %bb.0: # %entry
4282 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4283 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
4286 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
4287 ret <vscale x 8 x half> %0
4290 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
4292 define <vscale x 16 x half> @test_f_sf_vc_v_ivv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
4293 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m4:
4294 ; CHECK: # %bb.0: # %entry
4295 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4296 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
4299 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
4300 ret <vscale x 16 x half> %0
4303 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
4305 define <vscale x 32 x half> @test_f_sf_vc_v_ivv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
4306 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e16m8:
4307 ; CHECK: # %bb.0: # %entry
4308 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4309 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
4312 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
4313 ret <vscale x 32 x half> %0
4316 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
4318 define <vscale x 1 x float> @test_f_sf_vc_v_ivv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
4319 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32mf2:
4320 ; CHECK: # %bb.0: # %entry
4321 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4322 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4325 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
4326 ret <vscale x 1 x float> %0
4329 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
4331 define <vscale x 2 x float> @test_f_sf_vc_v_ivv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
4332 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m1:
4333 ; CHECK: # %bb.0: # %entry
4334 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4335 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4338 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
4339 ret <vscale x 2 x float> %0
4342 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
4344 define <vscale x 4 x float> @test_f_sf_vc_v_ivv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
4345 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m2:
4346 ; CHECK: # %bb.0: # %entry
4347 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4348 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
4351 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
4352 ret <vscale x 4 x float> %0
4355 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
4357 define <vscale x 8 x float> @test_f_sf_vc_v_ivv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
4358 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m4:
4359 ; CHECK: # %bb.0: # %entry
4360 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4361 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
4364 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
4365 ret <vscale x 8 x float> %0
4368 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
4370 define <vscale x 16 x float> @test_f_sf_vc_v_ivv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
4371 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e32m8:
4372 ; CHECK: # %bb.0: # %entry
4373 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4374 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
4377 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
4378 ret <vscale x 16 x float> %0
4381 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
4383 define <vscale x 1 x double> @test_f_sf_vc_v_ivv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
4384 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m1:
4385 ; CHECK: # %bb.0: # %entry
4386 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4387 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4390 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
4391 ret <vscale x 1 x double> %0
4394 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
4396 define <vscale x 2 x double> @test_f_sf_vc_v_ivv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
4397 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m2:
4398 ; CHECK: # %bb.0: # %entry
4399 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4400 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
4403 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
4404 ret <vscale x 2 x double> %0
4407 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
4409 define <vscale x 4 x double> @test_f_sf_vc_v_ivv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
4410 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m4:
4411 ; CHECK: # %bb.0: # %entry
4412 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4413 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
4416 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
4417 ret <vscale x 4 x double> %0
4420 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
4422 define <vscale x 8 x double> @test_f_sf_vc_v_ivv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
4423 ; CHECK-LABEL: test_f_sf_vc_v_ivv_se_e64m8:
4424 ; CHECK: # %bb.0: # %entry
4425 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4426 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
4429 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
4430 ret <vscale x 8 x double> %0
4433 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
4435 define <vscale x 1 x half> @test_f_sf_vc_v_ivv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
4436 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e16mf4:
4437 ; CHECK: # %bb.0: # %entry
4438 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4439 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4442 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
4443 ret <vscale x 1 x half> %0
4446 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.ivv.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen, iXLen)
4448 define <vscale x 2 x half> @test_f_sf_vc_v_ivv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
4449 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e16mf2:
4450 ; CHECK: # %bb.0: # %entry
4451 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4452 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4455 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
4456 ret <vscale x 2 x half> %0
4459 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.ivv.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen, iXLen)
4461 define <vscale x 4 x half> @test_f_sf_vc_v_ivv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
4462 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m1:
4463 ; CHECK: # %bb.0: # %entry
4464 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4465 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4468 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
4469 ret <vscale x 4 x half> %0
4472 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.ivv.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen, iXLen)
4474 define <vscale x 8 x half> @test_f_sf_vc_v_ivv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
4475 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m2:
4476 ; CHECK: # %bb.0: # %entry
4477 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4478 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
4481 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
4482 ret <vscale x 8 x half> %0
4485 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.ivv.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen, iXLen)
4487 define <vscale x 16 x half> @test_f_sf_vc_v_ivv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
4488 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m4:
4489 ; CHECK: # %bb.0: # %entry
4490 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4491 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
4494 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
4495 ret <vscale x 16 x half> %0
4498 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.ivv.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen, iXLen)
4500 define <vscale x 32 x half> @test_f_sf_vc_v_ivv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen %vl) {
4501 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e16m8:
4502 ; CHECK: # %bb.0: # %entry
4503 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4504 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
4507 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
4508 ret <vscale x 32 x half> %0
4511 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.ivv.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen, iXLen)
4513 define <vscale x 1 x float> @test_f_sf_vc_v_ivv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
4514 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e32mf2:
4515 ; CHECK: # %bb.0: # %entry
4516 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4517 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4520 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
4521 ret <vscale x 1 x float> %0
4524 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivv.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen, iXLen)
4526 define <vscale x 2 x float> @test_f_sf_vc_v_ivv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
4527 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m1:
4528 ; CHECK: # %bb.0: # %entry
4529 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4530 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4533 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
4534 ret <vscale x 2 x float> %0
4537 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivv.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen, iXLen)
4539 define <vscale x 4 x float> @test_f_sf_vc_v_ivv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
4540 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m2:
4541 ; CHECK: # %bb.0: # %entry
4542 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4543 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
4546 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
4547 ret <vscale x 4 x float> %0
4550 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivv.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen, iXLen)
4552 define <vscale x 8 x float> @test_f_sf_vc_v_ivv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
4553 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m4:
4554 ; CHECK: # %bb.0: # %entry
4555 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4556 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
4559 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
4560 ret <vscale x 8 x float> %0
4563 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivv.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen, iXLen)
4565 define <vscale x 16 x float> @test_f_sf_vc_v_ivv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen %vl) {
4566 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e32m8:
4567 ; CHECK: # %bb.0: # %entry
4568 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4569 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
4572 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
4573 ret <vscale x 16 x float> %0
4576 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivv.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen, iXLen)
4578 define <vscale x 1 x double> @test_f_sf_vc_v_ivv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen %vl) {
4579 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m1:
4580 ; CHECK: # %bb.0: # %entry
4581 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4582 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v9, 10
4585 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
4586 ret <vscale x 1 x double> %0
4589 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivv.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen, iXLen)
4591 define <vscale x 2 x double> @test_f_sf_vc_v_ivv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen %vl) {
4592 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m2:
4593 ; CHECK: # %bb.0: # %entry
4594 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4595 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v10, 10
4598 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
4599 ret <vscale x 2 x double> %0
4602 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivv.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen, iXLen)
4604 define <vscale x 4 x double> @test_f_sf_vc_v_ivv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen %vl) {
4605 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m4:
4606 ; CHECK: # %bb.0: # %entry
4607 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4608 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v12, 10
4611 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
4612 ret <vscale x 4 x double> %0
4615 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivv.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen, iXLen)
4617 define <vscale x 8 x double> @test_f_sf_vc_v_ivv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen %vl) {
4618 ; CHECK-LABEL: test_f_sf_vc_v_ivv_e64m8:
4619 ; CHECK: # %bb.0: # %entry
4620 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4621 ; CHECK-NEXT: sf.vc.v.ivv 3, v8, v16, 10
4624 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
4625 ret <vscale x 8 x double> %0
4628 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivv.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen, iXLen)
4630 define void @test_f_sf_vc_fvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
4631 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e16mf4:
4632 ; CHECK: # %bb.0: # %entry
4633 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4634 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
4637 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
4641 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
4643 define void @test_f_sf_vc_fvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
4644 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e16mf2:
4645 ; CHECK: # %bb.0: # %entry
4646 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4647 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
4650 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
4654 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
4656 define void @test_f_sf_vc_fvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
4657 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m1:
4658 ; CHECK: # %bb.0: # %entry
4659 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4660 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
4663 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
4667 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
4669 define void @test_f_sf_vc_fvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
4670 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m2:
4671 ; CHECK: # %bb.0: # %entry
4672 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4673 ; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
4676 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
4680 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
4682 define void @test_f_sf_vc_fvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
4683 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m4:
4684 ; CHECK: # %bb.0: # %entry
4685 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4686 ; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
4689 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
4693 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
4695 define void @test_f_sf_vc_fvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
4696 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e16m8:
4697 ; CHECK: # %bb.0: # %entry
4698 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4699 ; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
4702 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
4706 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv32f16.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
4708 define void @test_f_sf_vc_fvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
4709 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e32mf2:
4710 ; CHECK: # %bb.0: # %entry
4711 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4712 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
4715 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
4719 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
4721 define void @test_f_sf_vc_fvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
4722 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m1:
4723 ; CHECK: # %bb.0: # %entry
4724 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4725 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
4728 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
4732 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
4734 define void @test_f_sf_vc_fvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
4735 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m2:
4736 ; CHECK: # %bb.0: # %entry
4737 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4738 ; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
4741 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
4745 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
4747 define void @test_f_sf_vc_fvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
4748 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m4:
4749 ; CHECK: # %bb.0: # %entry
4750 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4751 ; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
4754 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
4758 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
4760 define void @test_f_sf_vc_fvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
4761 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e32m8:
4762 ; CHECK: # %bb.0: # %entry
4763 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4764 ; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
4767 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
4771 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv16f32.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
4773 define void @test_f_sf_vc_fvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
4774 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m1:
4775 ; CHECK: # %bb.0: # %entry
4776 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4777 ; CHECK-NEXT: sf.vc.fvv 1, v8, v9, fa0
4780 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f64.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
4784 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv1f64.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
4786 define void @test_f_sf_vc_fvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
4787 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m2:
4788 ; CHECK: # %bb.0: # %entry
4789 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4790 ; CHECK-NEXT: sf.vc.fvv 1, v8, v10, fa0
4793 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f64.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
4797 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv2f64.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
4799 define void @test_f_sf_vc_fvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
4800 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m4:
4801 ; CHECK: # %bb.0: # %entry
4802 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4803 ; CHECK-NEXT: sf.vc.fvv 1, v8, v12, fa0
4806 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f64.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
4810 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv4f64.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
4812 define void @test_f_sf_vc_fvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
4813 ; CHECK-LABEL: test_f_sf_vc_fvv_se_e64m8:
4814 ; CHECK: # %bb.0: # %entry
4815 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4816 ; CHECK-NEXT: sf.vc.fvv 1, v8, v16, fa0
4819 tail call void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f64.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
4823 declare void @llvm.riscv.sf.vc.fvv.se.iXLen.nxv8f64.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)
4825 define <vscale x 1 x half> @test_f_sf_vc_v_fvv_se_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
4826 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16mf4:
4827 ; CHECK: # %bb.0: # %entry
4828 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4829 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
4832 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
4833 ret <vscale x 1 x half> %0
4836 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
4838 define <vscale x 2 x half> @test_f_sf_vc_v_fvv_se_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
4839 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16mf2:
4840 ; CHECK: # %bb.0: # %entry
4841 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4842 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
4845 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
4846 ret <vscale x 2 x half> %0
4849 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
4851 define <vscale x 4 x half> @test_f_sf_vc_v_fvv_se_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
4852 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m1:
4853 ; CHECK: # %bb.0: # %entry
4854 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4855 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
4858 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
4859 ret <vscale x 4 x half> %0
4862 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
4864 define <vscale x 8 x half> @test_f_sf_vc_v_fvv_se_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
4865 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m2:
4866 ; CHECK: # %bb.0: # %entry
4867 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4868 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
4871 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
4872 ret <vscale x 8 x half> %0
4875 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
4877 define <vscale x 16 x half> @test_f_sf_vc_v_fvv_se_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
4878 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m4:
4879 ; CHECK: # %bb.0: # %entry
4880 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4881 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
4884 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
4885 ret <vscale x 16 x half> %0
4888 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
4890 define <vscale x 32 x half> @test_f_sf_vc_v_fvv_se_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
4891 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e16m8:
4892 ; CHECK: # %bb.0: # %entry
4893 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4894 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
4897 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
4898 ret <vscale x 32 x half> %0
4901 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.se.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
4903 define <vscale x 1 x float> @test_f_sf_vc_v_fvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
4904 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32mf2:
4905 ; CHECK: # %bb.0: # %entry
4906 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4907 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
4910 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
4911 ret <vscale x 1 x float> %0
4914 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
4916 define <vscale x 2 x float> @test_f_sf_vc_v_fvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
4917 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m1:
4918 ; CHECK: # %bb.0: # %entry
4919 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4920 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
4923 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
4924 ret <vscale x 2 x float> %0
4927 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
4929 define <vscale x 4 x float> @test_f_sf_vc_v_fvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
4930 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m2:
4931 ; CHECK: # %bb.0: # %entry
4932 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4933 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
4936 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
4937 ret <vscale x 4 x float> %0
4940 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
4942 define <vscale x 8 x float> @test_f_sf_vc_v_fvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
4943 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m4:
4944 ; CHECK: # %bb.0: # %entry
4945 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4946 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
4949 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
4950 ret <vscale x 8 x float> %0
4953 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
4955 define <vscale x 16 x float> @test_f_sf_vc_v_fvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
4956 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e32m8:
4957 ; CHECK: # %bb.0: # %entry
4958 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4959 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
4962 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
4963 ret <vscale x 16 x float> %0
4966 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.se.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
4968 define <vscale x 1 x double> @test_f_sf_vc_v_fvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
4969 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m1:
4970 ; CHECK: # %bb.0: # %entry
4971 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4972 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
4975 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
4976 ret <vscale x 1 x double> %0
4979 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
4981 define <vscale x 2 x double> @test_f_sf_vc_v_fvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
4982 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m2:
4983 ; CHECK: # %bb.0: # %entry
4984 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4985 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
4988 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
4989 ret <vscale x 2 x double> %0
4992 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
4994 define <vscale x 4 x double> @test_f_sf_vc_v_fvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
4995 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m4:
4996 ; CHECK: # %bb.0: # %entry
4997 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4998 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
5001 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
5002 ret <vscale x 4 x double> %0
5005 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
5007 define <vscale x 8 x double> @test_f_sf_vc_v_fvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
5008 ; CHECK-LABEL: test_f_sf_vc_v_fvv_se_e64m8:
5009 ; CHECK: # %bb.0: # %entry
5010 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
5011 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
5014 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
5015 ret <vscale x 8 x double> %0
5018 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.se.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)
5020 define <vscale x 1 x half> @test_f_sf_vc_v_fvv_e16mf4(<vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
5021 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e16mf4:
5022 ; CHECK: # %bb.0: # %entry
5023 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
5024 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
5027 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.nxv1f16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x half> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
5028 ret <vscale x 1 x half> %0
5031 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fvv.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, half, iXLen)
5033 define <vscale x 2 x half> @test_f_sf_vc_v_fvv_e16mf2(<vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
5034 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e16mf2:
5035 ; CHECK: # %bb.0: # %entry
5036 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
5037 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
5040 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.nxv2f16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x half> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
5041 ret <vscale x 2 x half> %0
5044 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fvv.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, half, iXLen)
5046 define <vscale x 4 x half> @test_f_sf_vc_v_fvv_e16m1(<vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
5047 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m1:
5048 ; CHECK: # %bb.0: # %entry
5049 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
5050 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
5053 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.nxv4f16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x half> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
5054 ret <vscale x 4 x half> %0
5057 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fvv.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, half, iXLen)
5059 define <vscale x 8 x half> @test_f_sf_vc_v_fvv_e16m2(<vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
5060 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m2:
5061 ; CHECK: # %bb.0: # %entry
5062 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
5063 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
5066 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.nxv8f16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x half> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
5067 ret <vscale x 8 x half> %0
5070 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fvv.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, half, iXLen)
5072 define <vscale x 16 x half> @test_f_sf_vc_v_fvv_e16m4(<vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
5073 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m4:
5074 ; CHECK: # %bb.0: # %entry
5075 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
5076 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
5079 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.nxv16f16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x half> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
5080 ret <vscale x 16 x half> %0
5083 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fvv.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, half, iXLen)
5085 define <vscale x 32 x half> @test_f_sf_vc_v_fvv_e16m8(<vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
5086 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e16m8:
5087 ; CHECK: # %bb.0: # %entry
5088 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
5089 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
5092 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.nxv32f16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x half> %vd, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
5093 ret <vscale x 32 x half> %0
5096 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fvv.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, half, iXLen)
5098 define <vscale x 1 x float> @test_f_sf_vc_v_fvv_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
5099 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e32mf2:
5100 ; CHECK: # %bb.0: # %entry
5101 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
5102 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
5105 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
5106 ret <vscale x 1 x float> %0
5109 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvv.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, float, iXLen)
5111 define <vscale x 2 x float> @test_f_sf_vc_v_fvv_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
5112 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m1:
5113 ; CHECK: # %bb.0: # %entry
5114 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
5115 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
5118 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
5119 ret <vscale x 2 x float> %0
5122 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvv.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, float, iXLen)
5124 define <vscale x 4 x float> @test_f_sf_vc_v_fvv_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
5125 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m2:
5126 ; CHECK: # %bb.0: # %entry
5127 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
5128 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
5131 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
5132 ret <vscale x 4 x float> %0
5135 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvv.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, float, iXLen)
5137 define <vscale x 8 x float> @test_f_sf_vc_v_fvv_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
5138 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m4:
5139 ; CHECK: # %bb.0: # %entry
5140 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
5141 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
5144 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
5145 ret <vscale x 8 x float> %0
5148 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvv.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, float, iXLen)
5150 define <vscale x 16 x float> @test_f_sf_vc_v_fvv_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
5151 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e32m8:
5152 ; CHECK: # %bb.0: # %entry
5153 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
5154 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
5157 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
5158 ret <vscale x 16 x float> %0
5161 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvv.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, float, iXLen)
5163 define <vscale x 1 x double> @test_f_sf_vc_v_fvv_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
5164 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m1:
5165 ; CHECK: # %bb.0: # %entry
5166 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
5167 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v9, fa0
5170 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
5171 ret <vscale x 1 x double> %0
5174 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvv.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, double, iXLen)
5176 define <vscale x 2 x double> @test_f_sf_vc_v_fvv_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
5177 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m2:
5178 ; CHECK: # %bb.0: # %entry
5179 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
5180 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v10, fa0
5183 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
5184 ret <vscale x 2 x double> %0
5187 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvv.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, double, iXLen)
5189 define <vscale x 4 x double> @test_f_sf_vc_v_fvv_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
5190 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m4:
5191 ; CHECK: # %bb.0: # %entry
5192 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
5193 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v12, fa0
5196 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
5197 ret <vscale x 4 x double> %0
5200 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvv.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, double, iXLen)
5202 define <vscale x 8 x double> @test_f_sf_vc_v_fvv_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
5203 ; CHECK-LABEL: test_f_sf_vc_v_fvv_e64m8:
5204 ; CHECK: # %bb.0: # %entry
5205 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
5206 ; CHECK-NEXT: sf.vc.v.fvv 1, v8, v16, fa0
5209 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
5210 ret <vscale x 8 x double> %0
5213 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvv.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, double, iXLen)