1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 define void @test_sf_vc_vv_se_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
8 ; CHECK-LABEL: test_sf_vc_vv_se_e8mf8:
9 ; CHECK: # %bb.0: # %entry
10 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
11 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
14 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
18 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
20 define void @test_sf_vc_vv_se_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
21 ; CHECK-LABEL: test_sf_vc_vv_se_e8mf4:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
24 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
27 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
31 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
33 define void @test_sf_vc_vv_se_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
34 ; CHECK-LABEL: test_sf_vc_vv_se_e8mf2:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
37 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
40 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
44 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
46 define void @test_sf_vc_vv_se_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
47 ; CHECK-LABEL: test_sf_vc_vv_se_e8m1:
48 ; CHECK: # %bb.0: # %entry
49 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
50 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
53 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
57 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
59 define void @test_sf_vc_vv_se_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
60 ; CHECK-LABEL: test_sf_vc_vv_se_e8m2:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
63 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
66 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
70 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
72 define void @test_sf_vc_vv_se_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
73 ; CHECK-LABEL: test_sf_vc_vv_se_e8m4:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
76 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
79 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
83 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
85 define void @test_sf_vc_vv_se_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
86 ; CHECK-LABEL: test_sf_vc_vv_se_e8m8:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
89 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
92 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen 3, iXLen 31, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
96 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv64i8.nxv64i8.iXLen(iXLen, iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
98 define void @test_sf_vc_vv_se_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
99 ; CHECK-LABEL: test_sf_vc_vv_se_e16mf4:
100 ; CHECK: # %bb.0: # %entry
101 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
102 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
105 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
109 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
111 define void @test_sf_vc_vv_se_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
112 ; CHECK-LABEL: test_sf_vc_vv_se_e16mf2:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
115 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
118 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
122 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
124 define void @test_sf_vc_vv_se_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
125 ; CHECK-LABEL: test_sf_vc_vv_se_e16m1:
126 ; CHECK: # %bb.0: # %entry
127 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
128 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
131 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
135 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
137 define void @test_sf_vc_vv_se_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
138 ; CHECK-LABEL: test_sf_vc_vv_se_e16m2:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
141 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
144 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
148 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
150 define void @test_sf_vc_vv_se_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
151 ; CHECK-LABEL: test_sf_vc_vv_se_e16m4:
152 ; CHECK: # %bb.0: # %entry
153 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
154 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
157 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
161 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
163 define void @test_sf_vc_vv_se_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
164 ; CHECK-LABEL: test_sf_vc_vv_se_e16m8:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
167 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
170 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
174 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32i16.nxv32i16.iXLen(iXLen, iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
176 define void @test_sf_vc_vv_se_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
177 ; CHECK-LABEL: test_sf_vc_vv_se_e32mf2:
178 ; CHECK: # %bb.0: # %entry
179 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
180 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
183 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
187 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
189 define void @test_sf_vc_vv_se_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
190 ; CHECK-LABEL: test_sf_vc_vv_se_e32m1:
191 ; CHECK: # %bb.0: # %entry
192 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
193 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
196 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
200 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
202 define void @test_sf_vc_vv_se_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
203 ; CHECK-LABEL: test_sf_vc_vv_se_e32m2:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
206 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
209 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
213 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
215 define void @test_sf_vc_vv_se_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
216 ; CHECK-LABEL: test_sf_vc_vv_se_e32m4:
217 ; CHECK: # %bb.0: # %entry
218 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
219 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
222 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
226 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
228 define void @test_sf_vc_vv_se_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
229 ; CHECK-LABEL: test_sf_vc_vv_se_e32m8:
230 ; CHECK: # %bb.0: # %entry
231 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
232 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
235 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
239 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16i32.nxv16i32.iXLen(iXLen, iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
241 define void @test_sf_vc_vv_se_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
242 ; CHECK-LABEL: test_sf_vc_vv_se_e64m1:
243 ; CHECK: # %bb.0: # %entry
244 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
245 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
248 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
252 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1i64.nxv1i64.iXLen(iXLen, iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
254 define void @test_sf_vc_vv_se_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
255 ; CHECK-LABEL: test_sf_vc_vv_se_e64m2:
256 ; CHECK: # %bb.0: # %entry
257 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
258 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
261 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
265 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2i64.nxv2i64.iXLen(iXLen, iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
267 define void @test_sf_vc_vv_se_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
268 ; CHECK-LABEL: test_sf_vc_vv_se_e64m4:
269 ; CHECK: # %bb.0: # %entry
270 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
271 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
274 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
278 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4i64.nxv4i64.iXLen(iXLen, iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
280 define void @test_sf_vc_vv_se_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
281 ; CHECK-LABEL: test_sf_vc_vv_se_e64m8:
282 ; CHECK: # %bb.0: # %entry
283 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
284 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
287 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
291 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8i64.nxv8i64.iXLen(iXLen, iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
293 define <vscale x 1 x i8> @test_sf_vc_v_vv_se_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
294 ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf8:
295 ; CHECK: # %bb.0: # %entry
296 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
297 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
300 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
301 ret <vscale x 1 x i8> %0
304 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
306 define <vscale x 2 x i8> @test_sf_vc_v_vv_se_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
307 ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf4:
308 ; CHECK: # %bb.0: # %entry
309 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
310 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
313 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
314 ret <vscale x 2 x i8> %0
317 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
319 define <vscale x 4 x i8> @test_sf_vc_v_vv_se_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
320 ; CHECK-LABEL: test_sf_vc_v_vv_se_e8mf2:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
323 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
326 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
327 ret <vscale x 4 x i8> %0
330 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
332 define <vscale x 8 x i8> @test_sf_vc_v_vv_se_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
333 ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m1:
334 ; CHECK: # %bb.0: # %entry
335 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
336 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
339 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
340 ret <vscale x 8 x i8> %0
343 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
345 define <vscale x 16 x i8> @test_sf_vc_v_vv_se_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
346 ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m2:
347 ; CHECK: # %bb.0: # %entry
348 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
349 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
352 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
353 ret <vscale x 16 x i8> %0
356 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
358 define <vscale x 32 x i8> @test_sf_vc_v_vv_se_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
359 ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m4:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
362 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
365 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
366 ret <vscale x 32 x i8> %0
369 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
371 define <vscale x 64 x i8> @test_sf_vc_v_vv_se_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
372 ; CHECK-LABEL: test_sf_vc_v_vv_se_e8m8:
373 ; CHECK: # %bb.0: # %entry
374 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
375 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
378 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
379 ret <vscale x 64 x i8> %0
382 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.se.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
384 define <vscale x 1 x i16> @test_sf_vc_v_vv_se_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
385 ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf4:
386 ; CHECK: # %bb.0: # %entry
387 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
388 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
391 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
392 ret <vscale x 1 x i16> %0
395 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
397 define <vscale x 2 x i16> @test_sf_vc_v_vv_se_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
398 ; CHECK-LABEL: test_sf_vc_v_vv_se_e16mf2:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
401 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
404 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
405 ret <vscale x 2 x i16> %0
408 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
410 define <vscale x 4 x i16> @test_sf_vc_v_vv_se_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
411 ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m1:
412 ; CHECK: # %bb.0: # %entry
413 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
414 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
417 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
418 ret <vscale x 4 x i16> %0
421 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
423 define <vscale x 8 x i16> @test_sf_vc_v_vv_se_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
424 ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m2:
425 ; CHECK: # %bb.0: # %entry
426 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
427 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
430 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
431 ret <vscale x 8 x i16> %0
434 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
436 define <vscale x 16 x i16> @test_sf_vc_v_vv_se_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
437 ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m4:
438 ; CHECK: # %bb.0: # %entry
439 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
440 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
443 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
444 ret <vscale x 16 x i16> %0
447 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
449 define <vscale x 32 x i16> @test_sf_vc_v_vv_se_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
450 ; CHECK-LABEL: test_sf_vc_v_vv_se_e16m8:
451 ; CHECK: # %bb.0: # %entry
452 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
453 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
456 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
457 ret <vscale x 32 x i16> %0
460 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.se.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
462 define <vscale x 1 x i32> @test_sf_vc_v_vv_se_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
463 ; CHECK-LABEL: test_sf_vc_v_vv_se_e32mf2:
464 ; CHECK: # %bb.0: # %entry
465 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
466 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
469 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
470 ret <vscale x 1 x i32> %0
473 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
475 define <vscale x 2 x i32> @test_sf_vc_v_vv_se_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
476 ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m1:
477 ; CHECK: # %bb.0: # %entry
478 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
479 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
482 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
483 ret <vscale x 2 x i32> %0
486 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
488 define <vscale x 4 x i32> @test_sf_vc_v_vv_se_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
489 ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m2:
490 ; CHECK: # %bb.0: # %entry
491 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
492 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
495 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
496 ret <vscale x 4 x i32> %0
499 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
501 define <vscale x 8 x i32> @test_sf_vc_v_vv_se_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
502 ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m4:
503 ; CHECK: # %bb.0: # %entry
504 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
505 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
508 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
509 ret <vscale x 8 x i32> %0
512 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
514 define <vscale x 16 x i32> @test_sf_vc_v_vv_se_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
515 ; CHECK-LABEL: test_sf_vc_v_vv_se_e32m8:
516 ; CHECK: # %bb.0: # %entry
517 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
518 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
521 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
522 ret <vscale x 16 x i32> %0
525 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.se.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
527 define <vscale x 1 x i64> @test_sf_vc_v_vv_se_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
528 ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m1:
529 ; CHECK: # %bb.0: # %entry
530 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
531 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
534 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
535 ret <vscale x 1 x i64> %0
538 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
540 define <vscale x 2 x i64> @test_sf_vc_v_vv_se_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
541 ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m2:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
544 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
547 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
548 ret <vscale x 2 x i64> %0
551 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
553 define <vscale x 4 x i64> @test_sf_vc_v_vv_se_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
554 ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m4:
555 ; CHECK: # %bb.0: # %entry
556 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
557 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
560 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
561 ret <vscale x 4 x i64> %0
564 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
566 define <vscale x 8 x i64> @test_sf_vc_v_vv_se_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
567 ; CHECK-LABEL: test_sf_vc_v_vv_se_e64m8:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
570 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
573 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
574 ret <vscale x 8 x i64> %0
577 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.se.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
579 define <vscale x 1 x i8> @test_sf_vc_v_vv_e8mf8(<vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
580 ; CHECK-LABEL: test_sf_vc_v_vv_e8mf8:
581 ; CHECK: # %bb.0: # %entry
582 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
583 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
586 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
587 ret <vscale x 1 x i8> %0
590 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.vv.nxv1i8.iXLen.nxv1i8.iXLen(iXLen, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
592 define <vscale x 2 x i8> @test_sf_vc_v_vv_e8mf4(<vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
593 ; CHECK-LABEL: test_sf_vc_v_vv_e8mf4:
594 ; CHECK: # %bb.0: # %entry
595 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
596 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
599 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
600 ret <vscale x 2 x i8> %0
603 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.vv.nxv2i8.iXLen.nxv2i8.iXLen(iXLen, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
605 define <vscale x 4 x i8> @test_sf_vc_v_vv_e8mf2(<vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
606 ; CHECK-LABEL: test_sf_vc_v_vv_e8mf2:
607 ; CHECK: # %bb.0: # %entry
608 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
609 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
612 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
613 ret <vscale x 4 x i8> %0
616 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.vv.nxv4i8.iXLen.nxv4i8.iXLen(iXLen, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
618 define <vscale x 8 x i8> @test_sf_vc_v_vv_e8m1(<vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
619 ; CHECK-LABEL: test_sf_vc_v_vv_e8m1:
620 ; CHECK: # %bb.0: # %entry
621 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
622 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
625 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
626 ret <vscale x 8 x i8> %0
629 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.vv.nxv8i8.iXLen.nxv8i8.iXLen(iXLen, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
631 define <vscale x 16 x i8> @test_sf_vc_v_vv_e8m2(<vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
632 ; CHECK-LABEL: test_sf_vc_v_vv_e8m2:
633 ; CHECK: # %bb.0: # %entry
634 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
635 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
638 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
639 ret <vscale x 16 x i8> %0
642 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.vv.nxv16i8.iXLen.nxv16i8.iXLen(iXLen, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
644 define <vscale x 32 x i8> @test_sf_vc_v_vv_e8m4(<vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
645 ; CHECK-LABEL: test_sf_vc_v_vv_e8m4:
646 ; CHECK: # %bb.0: # %entry
647 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
648 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
651 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
652 ret <vscale x 32 x i8> %0
655 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.vv.nxv32i8.iXLen.nxv32i8.iXLen(iXLen, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
657 define <vscale x 64 x i8> @test_sf_vc_v_vv_e8m8(<vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl) {
658 ; CHECK-LABEL: test_sf_vc_v_vv_e8m8:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
661 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
664 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, <vscale x 64 x i8> %vs1, iXLen %vl)
665 ret <vscale x 64 x i8> %0
668 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.vv.nxv64i8.iXLen.nxv64i8.iXLen(iXLen, <vscale x 64 x i8>, <vscale x 64 x i8>, iXLen)
670 define <vscale x 1 x i16> @test_sf_vc_v_vv_e16mf4(<vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
671 ; CHECK-LABEL: test_sf_vc_v_vv_e16mf4:
672 ; CHECK: # %bb.0: # %entry
673 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
674 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
677 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
678 ret <vscale x 1 x i16> %0
681 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vv.nxv1i16.iXLen.nxv1i16.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
683 define <vscale x 2 x i16> @test_sf_vc_v_vv_e16mf2(<vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
684 ; CHECK-LABEL: test_sf_vc_v_vv_e16mf2:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
687 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
690 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
691 ret <vscale x 2 x i16> %0
694 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vv.nxv2i16.iXLen.nxv2i16.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
696 define <vscale x 4 x i16> @test_sf_vc_v_vv_e16m1(<vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
697 ; CHECK-LABEL: test_sf_vc_v_vv_e16m1:
698 ; CHECK: # %bb.0: # %entry
699 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
700 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
703 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
704 ret <vscale x 4 x i16> %0
707 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vv.nxv4i16.iXLen.nxv4i16.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
709 define <vscale x 8 x i16> @test_sf_vc_v_vv_e16m2(<vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
710 ; CHECK-LABEL: test_sf_vc_v_vv_e16m2:
711 ; CHECK: # %bb.0: # %entry
712 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
713 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
716 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
717 ret <vscale x 8 x i16> %0
720 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vv.nxv8i16.iXLen.nxv8i16.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
722 define <vscale x 16 x i16> @test_sf_vc_v_vv_e16m4(<vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
723 ; CHECK-LABEL: test_sf_vc_v_vv_e16m4:
724 ; CHECK: # %bb.0: # %entry
725 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
726 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
729 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
730 ret <vscale x 16 x i16> %0
733 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vv.nxv16i16.iXLen.nxv16i16.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
735 define <vscale x 32 x i16> @test_sf_vc_v_vv_e16m8(<vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl) {
736 ; CHECK-LABEL: test_sf_vc_v_vv_e16m8:
737 ; CHECK: # %bb.0: # %entry
738 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
739 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
742 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, <vscale x 32 x i16> %vs1, iXLen %vl)
743 ret <vscale x 32 x i16> %0
746 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vv.nxv32i16.iXLen.nxv32i16.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i16>, iXLen)
748 define <vscale x 1 x i32> @test_sf_vc_v_vv_e32mf2(<vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
749 ; CHECK-LABEL: test_sf_vc_v_vv_e32mf2:
750 ; CHECK: # %bb.0: # %entry
751 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
752 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
755 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
756 ret <vscale x 1 x i32> %0
759 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vv.nxv1i32.iXLen.nxv1i32.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
761 define <vscale x 2 x i32> @test_sf_vc_v_vv_e32m1(<vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
762 ; CHECK-LABEL: test_sf_vc_v_vv_e32m1:
763 ; CHECK: # %bb.0: # %entry
764 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
765 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
768 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
769 ret <vscale x 2 x i32> %0
772 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vv.nxv2i32.iXLen.nxv2i32.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
774 define <vscale x 4 x i32> @test_sf_vc_v_vv_e32m2(<vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
775 ; CHECK-LABEL: test_sf_vc_v_vv_e32m2:
776 ; CHECK: # %bb.0: # %entry
777 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
778 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
781 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
782 ret <vscale x 4 x i32> %0
785 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vv.nxv4i32.iXLen.nxv4i32.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
787 define <vscale x 8 x i32> @test_sf_vc_v_vv_e32m4(<vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
788 ; CHECK-LABEL: test_sf_vc_v_vv_e32m4:
789 ; CHECK: # %bb.0: # %entry
790 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
791 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
794 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
795 ret <vscale x 8 x i32> %0
798 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vv.nxv8i32.iXLen.nxv8i32.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
800 define <vscale x 16 x i32> @test_sf_vc_v_vv_e32m8(<vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl) {
801 ; CHECK-LABEL: test_sf_vc_v_vv_e32m8:
802 ; CHECK: # %bb.0: # %entry
803 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
804 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
807 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, <vscale x 16 x i32> %vs1, iXLen %vl)
808 ret <vscale x 16 x i32> %0
811 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vv.nxv16i32.iXLen.nxv16i32.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i32>, iXLen)
813 define <vscale x 1 x i64> @test_sf_vc_v_vv_e64m1(<vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl) {
814 ; CHECK-LABEL: test_sf_vc_v_vv_e64m1:
815 ; CHECK: # %bb.0: # %entry
816 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
817 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
820 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen 3, <vscale x 1 x i64> %vs2, <vscale x 1 x i64> %vs1, iXLen %vl)
821 ret <vscale x 1 x i64> %0
824 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vv.nxv1i64.iXLen.nxv1i64.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i64>, iXLen)
826 define <vscale x 2 x i64> @test_sf_vc_v_vv_e64m2(<vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl) {
827 ; CHECK-LABEL: test_sf_vc_v_vv_e64m2:
828 ; CHECK: # %bb.0: # %entry
829 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
830 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
833 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen 3, <vscale x 2 x i64> %vs2, <vscale x 2 x i64> %vs1, iXLen %vl)
834 ret <vscale x 2 x i64> %0
837 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vv.nxv2i64.iXLen.nxv2i64.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i64>, iXLen)
839 define <vscale x 4 x i64> @test_sf_vc_v_vv_e64m4(<vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl) {
840 ; CHECK-LABEL: test_sf_vc_v_vv_e64m4:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
843 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
846 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen 3, <vscale x 4 x i64> %vs2, <vscale x 4 x i64> %vs1, iXLen %vl)
847 ret <vscale x 4 x i64> %0
850 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vv.nxv4i64.iXLen.nxv4i64.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i64>, iXLen)
852 define <vscale x 8 x i64> @test_sf_vc_v_vv_e64m8(<vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl) {
853 ; CHECK-LABEL: test_sf_vc_v_vv_e64m8:
854 ; CHECK: # %bb.0: # %entry
855 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
856 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
859 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen 3, <vscale x 8 x i64> %vs2, <vscale x 8 x i64> %vs1, iXLen %vl)
860 ret <vscale x 8 x i64> %0
863 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vv.nxv8i64.iXLen.nxv8i64.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i64>, iXLen)
865 define void @test_sf_vc_xv_se_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
866 ; CHECK-LABEL: test_sf_vc_xv_se_e8mf8:
867 ; CHECK: # %bb.0: # %entry
868 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
869 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
872 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
876 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i8.i8.iXLen(iXLen, iXLen, <vscale x 1 x i8>, i8, iXLen)
878 define void @test_sf_vc_xv_se_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
879 ; CHECK-LABEL: test_sf_vc_xv_se_e8mf4:
880 ; CHECK: # %bb.0: # %entry
881 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
882 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
885 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
889 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i8.i8.iXLen(iXLen, iXLen, <vscale x 2 x i8>, i8, iXLen)
891 define void @test_sf_vc_xv_se_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
892 ; CHECK-LABEL: test_sf_vc_xv_se_e8mf2:
893 ; CHECK: # %bb.0: # %entry
894 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
895 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
898 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
902 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i8.i8.iXLen(iXLen, iXLen, <vscale x 4 x i8>, i8, iXLen)
904 define void @test_sf_vc_xv_se_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
905 ; CHECK-LABEL: test_sf_vc_xv_se_e8m1:
906 ; CHECK: # %bb.0: # %entry
907 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
908 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
911 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
915 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i8.i8.iXLen(iXLen, iXLen, <vscale x 8 x i8>, i8, iXLen)
917 define void @test_sf_vc_xv_se_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
918 ; CHECK-LABEL: test_sf_vc_xv_se_e8m2:
919 ; CHECK: # %bb.0: # %entry
920 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
921 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
924 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
928 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i8.i8.iXLen(iXLen, iXLen, <vscale x 16 x i8>, i8, iXLen)
930 define void @test_sf_vc_xv_se_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
931 ; CHECK-LABEL: test_sf_vc_xv_se_e8m4:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
934 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
937 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
941 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i8.i8.iXLen(iXLen, iXLen, <vscale x 32 x i8>, i8, iXLen)
943 define void @test_sf_vc_xv_se_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
944 ; CHECK-LABEL: test_sf_vc_xv_se_e8m8:
945 ; CHECK: # %bb.0: # %entry
946 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
947 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
950 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen 3, iXLen 31, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
954 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv64i8.i8.iXLen(iXLen, iXLen, <vscale x 64 x i8>, i8, iXLen)
956 define void @test_sf_vc_xv_se_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
957 ; CHECK-LABEL: test_sf_vc_xv_se_e16mf4:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
960 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
963 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
967 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i16.i16.iXLen(iXLen, iXLen, <vscale x 1 x i16>, i16, iXLen)
969 define void @test_sf_vc_xv_se_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
970 ; CHECK-LABEL: test_sf_vc_xv_se_e16mf2:
971 ; CHECK: # %bb.0: # %entry
972 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
973 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
976 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
980 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i16.i16.iXLen(iXLen, iXLen, <vscale x 2 x i16>, i16, iXLen)
982 define void @test_sf_vc_xv_se_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
983 ; CHECK-LABEL: test_sf_vc_xv_se_e16m1:
984 ; CHECK: # %bb.0: # %entry
985 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
986 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
989 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
993 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i16.i16.iXLen(iXLen, iXLen, <vscale x 4 x i16>, i16, iXLen)
995 define void @test_sf_vc_xv_se_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
996 ; CHECK-LABEL: test_sf_vc_xv_se_e16m2:
997 ; CHECK: # %bb.0: # %entry
998 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
999 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
1002 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1006 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i16.i16.iXLen(iXLen, iXLen, <vscale x 8 x i16>, i16, iXLen)
1008 define void @test_sf_vc_xv_se_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1009 ; CHECK-LABEL: test_sf_vc_xv_se_e16m4:
1010 ; CHECK: # %bb.0: # %entry
1011 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1012 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
1015 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1019 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i16.i16.iXLen(iXLen, iXLen, <vscale x 16 x i16>, i16, iXLen)
1021 define void @test_sf_vc_xv_se_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1022 ; CHECK-LABEL: test_sf_vc_xv_se_e16m8:
1023 ; CHECK: # %bb.0: # %entry
1024 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1025 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
1028 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1032 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32i16.i16.iXLen(iXLen, iXLen, <vscale x 32 x i16>, i16, iXLen)
1034 define void @test_sf_vc_xv_se_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1035 ; CHECK-LABEL: test_sf_vc_xv_se_e32mf2:
1036 ; CHECK: # %bb.0: # %entry
1037 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1038 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
1041 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1045 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1i32.i32.iXLen(iXLen, iXLen, <vscale x 1 x i32>, i32, iXLen)
1047 define void @test_sf_vc_xv_se_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1048 ; CHECK-LABEL: test_sf_vc_xv_se_e32m1:
1049 ; CHECK: # %bb.0: # %entry
1050 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1051 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
1054 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1058 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2i32.i32.iXLen(iXLen, iXLen, <vscale x 2 x i32>, i32, iXLen)
1060 define void @test_sf_vc_xv_se_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1061 ; CHECK-LABEL: test_sf_vc_xv_se_e32m2:
1062 ; CHECK: # %bb.0: # %entry
1063 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1064 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
1067 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1071 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4i32.i32.iXLen(iXLen, iXLen, <vscale x 4 x i32>, i32, iXLen)
1073 define void @test_sf_vc_xv_se_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1074 ; CHECK-LABEL: test_sf_vc_xv_se_e32m4:
1075 ; CHECK: # %bb.0: # %entry
1076 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1077 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
1080 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1084 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8i32.i32.iXLen(iXLen, iXLen, <vscale x 8 x i32>, i32, iXLen)
1086 define void @test_sf_vc_xv_se_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1087 ; CHECK-LABEL: test_sf_vc_xv_se_e32m8:
1088 ; CHECK: # %bb.0: # %entry
1089 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1090 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
1093 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1097 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16i32.i32.iXLen(iXLen, iXLen, <vscale x 16 x i32>, i32, iXLen)
1099 define <vscale x 1 x i8> @test_sf_vc_v_xv_se_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1100 ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf8:
1101 ; CHECK: # %bb.0: # %entry
1102 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1103 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1106 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
1107 ret <vscale x 1 x i8> %0
1110 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, i8, iXLen)
1112 define <vscale x 2 x i8> @test_sf_vc_v_xv_se_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1113 ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf4:
1114 ; CHECK: # %bb.0: # %entry
1115 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1116 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1119 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1120 ret <vscale x 2 x i8> %0
1123 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, i8, iXLen)
1125 define <vscale x 4 x i8> @test_sf_vc_v_xv_se_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1126 ; CHECK-LABEL: test_sf_vc_v_xv_se_e8mf2:
1127 ; CHECK: # %bb.0: # %entry
1128 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1129 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1132 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1133 ret <vscale x 4 x i8> %0
1136 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, i8, iXLen)
1138 define <vscale x 8 x i8> @test_sf_vc_v_xv_se_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1139 ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m1:
1140 ; CHECK: # %bb.0: # %entry
1141 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1142 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1145 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1146 ret <vscale x 8 x i8> %0
1149 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, i8, iXLen)
1151 define <vscale x 16 x i8> @test_sf_vc_v_xv_se_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1152 ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m2:
1153 ; CHECK: # %bb.0: # %entry
1154 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1155 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1158 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1159 ret <vscale x 16 x i8> %0
1162 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, i8, iXLen)
1164 define <vscale x 32 x i8> @test_sf_vc_v_xv_se_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1165 ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m4:
1166 ; CHECK: # %bb.0: # %entry
1167 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1168 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1171 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1172 ret <vscale x 32 x i8> %0
1175 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, i8, iXLen)
1177 define <vscale x 64 x i8> @test_sf_vc_v_xv_se_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1178 ; CHECK-LABEL: test_sf_vc_v_xv_se_e8m8:
1179 ; CHECK: # %bb.0: # %entry
1180 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1181 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1184 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
1185 ret <vscale x 64 x i8> %0
1188 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.se.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, i8, iXLen)
1190 define <vscale x 1 x i16> @test_sf_vc_v_xv_se_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1191 ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf4:
1192 ; CHECK: # %bb.0: # %entry
1193 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1194 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1197 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1198 ret <vscale x 1 x i16> %0
1201 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, i16, iXLen)
1203 define <vscale x 2 x i16> @test_sf_vc_v_xv_se_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1204 ; CHECK-LABEL: test_sf_vc_v_xv_se_e16mf2:
1205 ; CHECK: # %bb.0: # %entry
1206 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1207 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1210 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1211 ret <vscale x 2 x i16> %0
1214 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, i16, iXLen)
1216 define <vscale x 4 x i16> @test_sf_vc_v_xv_se_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1217 ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m1:
1218 ; CHECK: # %bb.0: # %entry
1219 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1220 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1223 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1224 ret <vscale x 4 x i16> %0
1227 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, i16, iXLen)
1229 define <vscale x 8 x i16> @test_sf_vc_v_xv_se_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1230 ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m2:
1231 ; CHECK: # %bb.0: # %entry
1232 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1233 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1236 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1237 ret <vscale x 8 x i16> %0
1240 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, i16, iXLen)
1242 define <vscale x 16 x i16> @test_sf_vc_v_xv_se_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1243 ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m4:
1244 ; CHECK: # %bb.0: # %entry
1245 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1246 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1249 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1250 ret <vscale x 16 x i16> %0
1253 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, i16, iXLen)
1255 define <vscale x 32 x i16> @test_sf_vc_v_xv_se_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1256 ; CHECK-LABEL: test_sf_vc_v_xv_se_e16m8:
1257 ; CHECK: # %bb.0: # %entry
1258 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1259 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1262 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1263 ret <vscale x 32 x i16> %0
1266 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.se.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, i16, iXLen)
1268 define <vscale x 1 x i32> @test_sf_vc_v_xv_se_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1269 ; CHECK-LABEL: test_sf_vc_v_xv_se_e32mf2:
1270 ; CHECK: # %bb.0: # %entry
1271 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1272 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1275 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1276 ret <vscale x 1 x i32> %0
1279 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv1i32.i32.i32.iXLen(iXLen, <vscale x 1 x i32>, i32, iXLen)
1281 define <vscale x 2 x i32> @test_sf_vc_v_xv_se_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1282 ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m1:
1283 ; CHECK: # %bb.0: # %entry
1284 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1285 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1288 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1289 ret <vscale x 2 x i32> %0
1292 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv2i32.i32.i32.iXLen(iXLen, <vscale x 2 x i32>, i32, iXLen)
1294 define <vscale x 4 x i32> @test_sf_vc_v_xv_se_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1295 ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m2:
1296 ; CHECK: # %bb.0: # %entry
1297 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1298 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1301 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1302 ret <vscale x 4 x i32> %0
1305 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv4i32.i32.i32.iXLen(iXLen, <vscale x 4 x i32>, i32, iXLen)
1307 define <vscale x 8 x i32> @test_sf_vc_v_xv_se_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1308 ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m4:
1309 ; CHECK: # %bb.0: # %entry
1310 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1311 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1314 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1315 ret <vscale x 8 x i32> %0
1318 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv8i32.i32.i32.iXLen(iXLen, <vscale x 8 x i32>, i32, iXLen)
1320 define <vscale x 16 x i32> @test_sf_vc_v_xv_se_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1321 ; CHECK-LABEL: test_sf_vc_v_xv_se_e32m8:
1322 ; CHECK: # %bb.0: # %entry
1323 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1324 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1327 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1328 ret <vscale x 16 x i32> %0
1331 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.se.nxv16i32.i32.i32.iXLen(iXLen, <vscale x 16 x i32>, i32, iXLen)
1333 define <vscale x 1 x i8> @test_sf_vc_v_xv_e8mf8(<vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1334 ; CHECK-LABEL: test_sf_vc_v_xv_e8mf8:
1335 ; CHECK: # %bb.0: # %entry
1336 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
1337 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1340 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
1341 ret <vscale x 1 x i8> %0
1344 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.xv.nxv1i8.iXLen.i8.iXLen(iXLen, <vscale x 1 x i8>, i8, iXLen)
1346 define <vscale x 2 x i8> @test_sf_vc_v_xv_e8mf4(<vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1347 ; CHECK-LABEL: test_sf_vc_v_xv_e8mf4:
1348 ; CHECK: # %bb.0: # %entry
1349 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
1350 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1353 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1354 ret <vscale x 2 x i8> %0
1357 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.xv.nxv2i8.iXLen.i8.iXLen(iXLen, <vscale x 2 x i8>, i8, iXLen)
1359 define <vscale x 4 x i8> @test_sf_vc_v_xv_e8mf2(<vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1360 ; CHECK-LABEL: test_sf_vc_v_xv_e8mf2:
1361 ; CHECK: # %bb.0: # %entry
1362 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1363 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1366 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1367 ret <vscale x 4 x i8> %0
1370 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.xv.nxv4i8.iXLen.i8.iXLen(iXLen, <vscale x 4 x i8>, i8, iXLen)
1372 define <vscale x 8 x i8> @test_sf_vc_v_xv_e8m1(<vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1373 ; CHECK-LABEL: test_sf_vc_v_xv_e8m1:
1374 ; CHECK: # %bb.0: # %entry
1375 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1376 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1379 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1380 ret <vscale x 8 x i8> %0
1383 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.xv.nxv8i8.iXLen.i8.iXLen(iXLen, <vscale x 8 x i8>, i8, iXLen)
1385 define <vscale x 16 x i8> @test_sf_vc_v_xv_e8m2(<vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1386 ; CHECK-LABEL: test_sf_vc_v_xv_e8m2:
1387 ; CHECK: # %bb.0: # %entry
1388 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
1389 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1392 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1393 ret <vscale x 16 x i8> %0
1396 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.xv.nxv16i8.iXLen.i8.iXLen(iXLen, <vscale x 16 x i8>, i8, iXLen)
1398 define <vscale x 32 x i8> @test_sf_vc_v_xv_e8m4(<vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1399 ; CHECK-LABEL: test_sf_vc_v_xv_e8m4:
1400 ; CHECK: # %bb.0: # %entry
1401 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
1402 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1405 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1406 ret <vscale x 32 x i8> %0
1409 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.xv.nxv32i8.iXLen.i8.iXLen(iXLen, <vscale x 32 x i8>, i8, iXLen)
1411 define <vscale x 64 x i8> @test_sf_vc_v_xv_e8m8(<vscale x 64 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1412 ; CHECK-LABEL: test_sf_vc_v_xv_e8m8:
1413 ; CHECK: # %bb.0: # %entry
1414 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
1415 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1418 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, i8 %rs1, iXLen %vl)
1419 ret <vscale x 64 x i8> %0
1422 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.xv.nxv64i8.iXLen.i8.iXLen(iXLen, <vscale x 64 x i8>, i8, iXLen)
1424 define <vscale x 1 x i16> @test_sf_vc_v_xv_e16mf4(<vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1425 ; CHECK-LABEL: test_sf_vc_v_xv_e16mf4:
1426 ; CHECK: # %bb.0: # %entry
1427 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1428 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1431 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1432 ret <vscale x 1 x i16> %0
1435 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xv.nxv1i16.iXLen.i16.iXLen(iXLen, <vscale x 1 x i16>, i16, iXLen)
1437 define <vscale x 2 x i16> @test_sf_vc_v_xv_e16mf2(<vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1438 ; CHECK-LABEL: test_sf_vc_v_xv_e16mf2:
1439 ; CHECK: # %bb.0: # %entry
1440 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
1441 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1444 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1445 ret <vscale x 2 x i16> %0
1448 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xv.nxv2i16.iXLen.i16.iXLen(iXLen, <vscale x 2 x i16>, i16, iXLen)
1450 define <vscale x 4 x i16> @test_sf_vc_v_xv_e16m1(<vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1451 ; CHECK-LABEL: test_sf_vc_v_xv_e16m1:
1452 ; CHECK: # %bb.0: # %entry
1453 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
1454 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1457 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1458 ret <vscale x 4 x i16> %0
1461 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xv.nxv4i16.iXLen.i16.iXLen(iXLen, <vscale x 4 x i16>, i16, iXLen)
1463 define <vscale x 8 x i16> @test_sf_vc_v_xv_e16m2(<vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1464 ; CHECK-LABEL: test_sf_vc_v_xv_e16m2:
1465 ; CHECK: # %bb.0: # %entry
1466 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1467 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1470 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1471 ret <vscale x 8 x i16> %0
1474 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xv.nxv8i16.iXLen.i16.iXLen(iXLen, <vscale x 8 x i16>, i16, iXLen)
1476 define <vscale x 16 x i16> @test_sf_vc_v_xv_e16m4(<vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1477 ; CHECK-LABEL: test_sf_vc_v_xv_e16m4:
1478 ; CHECK: # %bb.0: # %entry
1479 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
1480 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1483 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1484 ret <vscale x 16 x i16> %0
1487 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xv.nxv16i16.iXLen.i16.iXLen(iXLen, <vscale x 16 x i16>, i16, iXLen)
1489 define <vscale x 32 x i16> @test_sf_vc_v_xv_e16m8(<vscale x 32 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1490 ; CHECK-LABEL: test_sf_vc_v_xv_e16m8:
1491 ; CHECK: # %bb.0: # %entry
1492 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
1493 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1496 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, i16 %rs1, iXLen %vl)
1497 ret <vscale x 32 x i16> %0
1500 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xv.nxv32i16.iXLen.i16.iXLen(iXLen, <vscale x 32 x i16>, i16, iXLen)
1502 define <vscale x 1 x i32> @test_sf_vc_v_xv_e32mf2(<vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1503 ; CHECK-LABEL: test_sf_vc_v_xv_e32mf2:
1504 ; CHECK: # %bb.0: # %entry
1505 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1506 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1509 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1510 ret <vscale x 1 x i32> %0
1513 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xv.nxv1i32.i32.i32.iXLen(iXLen, <vscale x 1 x i32>, i32, iXLen)
1515 define <vscale x 2 x i32> @test_sf_vc_v_xv_e32m1(<vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1516 ; CHECK-LABEL: test_sf_vc_v_xv_e32m1:
1517 ; CHECK: # %bb.0: # %entry
1518 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
1519 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1522 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1523 ret <vscale x 2 x i32> %0
1526 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xv.nxv2i32.i32.i32.iXLen(iXLen, <vscale x 2 x i32>, i32, iXLen)
1528 define <vscale x 4 x i32> @test_sf_vc_v_xv_e32m2(<vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1529 ; CHECK-LABEL: test_sf_vc_v_xv_e32m2:
1530 ; CHECK: # %bb.0: # %entry
1531 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
1532 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1535 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1536 ret <vscale x 4 x i32> %0
1539 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xv.nxv4i32.i32.i32.iXLen(iXLen, <vscale x 4 x i32>, i32, iXLen)
1541 define <vscale x 8 x i32> @test_sf_vc_v_xv_e32m4(<vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1542 ; CHECK-LABEL: test_sf_vc_v_xv_e32m4:
1543 ; CHECK: # %bb.0: # %entry
1544 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1545 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1548 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1549 ret <vscale x 8 x i32> %0
1552 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xv.nxv8i32.i32.i32.iXLen(iXLen, <vscale x 8 x i32>, i32, iXLen)
1554 define <vscale x 16 x i32> @test_sf_vc_v_xv_e32m8(<vscale x 16 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1555 ; CHECK-LABEL: test_sf_vc_v_xv_e32m8:
1556 ; CHECK: # %bb.0: # %entry
1557 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
1558 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
1561 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, i32 %rs1, iXLen %vl)
1562 ret <vscale x 16 x i32> %0
1565 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xv.nxv16i32.i32.i32.iXLen(iXLen, <vscale x 16 x i32>, i32, iXLen)
1567 define void @test_sf_vc_iv_se_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
1568 ; CHECK-LABEL: test_sf_vc_iv_se_e8mf8:
1569 ; CHECK: # %bb.0: # %entry
1570 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1571 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1574 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1578 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i8.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x i8>, iXLen, iXLen)
1580 define void @test_sf_vc_iv_se_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
1581 ; CHECK-LABEL: test_sf_vc_iv_se_e8mf4:
1582 ; CHECK: # %bb.0: # %entry
1583 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1584 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1587 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1591 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i8.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x i8>, iXLen, iXLen)
1593 define void @test_sf_vc_iv_se_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
1594 ; CHECK-LABEL: test_sf_vc_iv_se_e8mf2:
1595 ; CHECK: # %bb.0: # %entry
1596 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1597 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1600 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1604 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i8.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x i8>, iXLen, iXLen)
1606 define void @test_sf_vc_iv_se_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
1607 ; CHECK-LABEL: test_sf_vc_iv_se_e8m1:
1608 ; CHECK: # %bb.0: # %entry
1609 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1610 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1613 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1617 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i8.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x i8>, iXLen, iXLen)
1619 define void @test_sf_vc_iv_se_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
1620 ; CHECK-LABEL: test_sf_vc_iv_se_e8m2:
1621 ; CHECK: # %bb.0: # %entry
1622 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1623 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1626 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1630 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i8.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x i8>, iXLen, iXLen)
1632 define void @test_sf_vc_iv_se_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
1633 ; CHECK-LABEL: test_sf_vc_iv_se_e8m4:
1634 ; CHECK: # %bb.0: # %entry
1635 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1636 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1639 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1643 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i8.iXLen.iXLen(iXLen, iXLen, <vscale x 32 x i8>, iXLen, iXLen)
1645 define void @test_sf_vc_iv_se_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
1646 ; CHECK-LABEL: test_sf_vc_iv_se_e8m8:
1647 ; CHECK: # %bb.0: # %entry
1648 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1649 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1652 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
1656 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv64i8.iXLen.iXLen(iXLen, iXLen, <vscale x 64 x i8>, iXLen, iXLen)
1658 define void @test_sf_vc_iv_se_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
1659 ; CHECK-LABEL: test_sf_vc_iv_se_e16mf4:
1660 ; CHECK: # %bb.0: # %entry
1661 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1662 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1665 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1669 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i16.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x i16>, iXLen, iXLen)
1671 define void @test_sf_vc_iv_se_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
1672 ; CHECK-LABEL: test_sf_vc_iv_se_e16mf2:
1673 ; CHECK: # %bb.0: # %entry
1674 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1675 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1678 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1682 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i16.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x i16>, iXLen, iXLen)
1684 define void @test_sf_vc_iv_se_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
1685 ; CHECK-LABEL: test_sf_vc_iv_se_e16m1:
1686 ; CHECK: # %bb.0: # %entry
1687 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1688 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1691 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1695 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i16.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x i16>, iXLen, iXLen)
1697 define void @test_sf_vc_iv_se_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
1698 ; CHECK-LABEL: test_sf_vc_iv_se_e16m2:
1699 ; CHECK: # %bb.0: # %entry
1700 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1701 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1704 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1708 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i16.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x i16>, iXLen, iXLen)
1710 define void @test_sf_vc_iv_se_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
1711 ; CHECK-LABEL: test_sf_vc_iv_se_e16m4:
1712 ; CHECK: # %bb.0: # %entry
1713 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1714 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1717 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1721 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i16.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x i16>, iXLen, iXLen)
1723 define void @test_sf_vc_iv_se_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
1724 ; CHECK-LABEL: test_sf_vc_iv_se_e16m8:
1725 ; CHECK: # %bb.0: # %entry
1726 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
1727 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1730 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
1734 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32i16.iXLen.iXLen(iXLen, iXLen, <vscale x 32 x i16>, iXLen, iXLen)
1736 define void @test_sf_vc_iv_se_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
1737 ; CHECK-LABEL: test_sf_vc_iv_se_e32mf2:
1738 ; CHECK: # %bb.0: # %entry
1739 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1740 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1743 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1747 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i32.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x i32>, iXLen, iXLen)
1749 define void @test_sf_vc_iv_se_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
1750 ; CHECK-LABEL: test_sf_vc_iv_se_e32m1:
1751 ; CHECK: # %bb.0: # %entry
1752 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1753 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1756 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1760 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i32.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x i32>, iXLen, iXLen)
1762 define void @test_sf_vc_iv_se_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
1763 ; CHECK-LABEL: test_sf_vc_iv_se_e32m2:
1764 ; CHECK: # %bb.0: # %entry
1765 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1766 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1769 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1773 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i32.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x i32>, iXLen, iXLen)
1775 define void @test_sf_vc_iv_se_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
1776 ; CHECK-LABEL: test_sf_vc_iv_se_e32m4:
1777 ; CHECK: # %bb.0: # %entry
1778 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1779 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1782 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1786 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i32.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x i32>, iXLen, iXLen)
1788 define void @test_sf_vc_iv_se_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
1789 ; CHECK-LABEL: test_sf_vc_iv_se_e32m8:
1790 ; CHECK: # %bb.0: # %entry
1791 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
1792 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1795 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
1799 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16i32.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x i32>, iXLen, iXLen)
1801 define void @test_sf_vc_iv_se_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
1802 ; CHECK-LABEL: test_sf_vc_iv_se_e64m1:
1803 ; CHECK: # %bb.0: # %entry
1804 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
1805 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1808 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
1812 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1i64.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x i64>, iXLen, iXLen)
1814 define void @test_sf_vc_iv_se_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
1815 ; CHECK-LABEL: test_sf_vc_iv_se_e64m2:
1816 ; CHECK: # %bb.0: # %entry
1817 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
1818 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1821 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
1825 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2i64.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x i64>, iXLen, iXLen)
1827 define void @test_sf_vc_iv_se_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
1828 ; CHECK-LABEL: test_sf_vc_iv_se_e64m4:
1829 ; CHECK: # %bb.0: # %entry
1830 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
1831 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1834 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
1838 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4i64.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x i64>, iXLen, iXLen)
1840 define void @test_sf_vc_iv_se_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
1841 ; CHECK-LABEL: test_sf_vc_iv_se_e64m8:
1842 ; CHECK: # %bb.0: # %entry
1843 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
1844 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
1847 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
1851 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8i64.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x i64>, iXLen, iXLen)
1853 define <vscale x 1 x i8> @test_sf_vc_v_iv_se_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
1854 ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf8:
1855 ; CHECK: # %bb.0: # %entry
1856 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1857 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1860 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1861 ret <vscale x 1 x i8> %0
1864 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, iXLen, iXLen)
1866 define <vscale x 2 x i8> @test_sf_vc_v_iv_se_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
1867 ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf4:
1868 ; CHECK: # %bb.0: # %entry
1869 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1870 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1873 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1874 ret <vscale x 2 x i8> %0
1877 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, iXLen, iXLen)
1879 define <vscale x 4 x i8> @test_sf_vc_v_iv_se_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
1880 ; CHECK-LABEL: test_sf_vc_v_iv_se_e8mf2:
1881 ; CHECK: # %bb.0: # %entry
1882 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1883 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1886 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1887 ret <vscale x 4 x i8> %0
1890 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, iXLen, iXLen)
1892 define <vscale x 8 x i8> @test_sf_vc_v_iv_se_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
1893 ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m1:
1894 ; CHECK: # %bb.0: # %entry
1895 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1896 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1899 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1900 ret <vscale x 8 x i8> %0
1903 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, iXLen, iXLen)
1905 define <vscale x 16 x i8> @test_sf_vc_v_iv_se_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
1906 ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m2:
1907 ; CHECK: # %bb.0: # %entry
1908 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1909 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1912 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1913 ret <vscale x 16 x i8> %0
1916 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, iXLen, iXLen)
1918 define <vscale x 32 x i8> @test_sf_vc_v_iv_se_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
1919 ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m4:
1920 ; CHECK: # %bb.0: # %entry
1921 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1922 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1925 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1926 ret <vscale x 32 x i8> %0
1929 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, iXLen, iXLen)
1931 define <vscale x 64 x i8> @test_sf_vc_v_iv_se_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
1932 ; CHECK-LABEL: test_sf_vc_v_iv_se_e8m8:
1933 ; CHECK: # %bb.0: # %entry
1934 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
1935 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1938 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
1939 ret <vscale x 64 x i8> %0
1942 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.se.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, iXLen, iXLen)
1944 define <vscale x 1 x i16> @test_sf_vc_v_iv_se_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
1945 ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf4:
1946 ; CHECK: # %bb.0: # %entry
1947 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1948 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1951 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1952 ret <vscale x 1 x i16> %0
1955 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, iXLen, iXLen)
1957 define <vscale x 2 x i16> @test_sf_vc_v_iv_se_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
1958 ; CHECK-LABEL: test_sf_vc_v_iv_se_e16mf2:
1959 ; CHECK: # %bb.0: # %entry
1960 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1961 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1964 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1965 ret <vscale x 2 x i16> %0
1968 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, iXLen, iXLen)
1970 define <vscale x 4 x i16> @test_sf_vc_v_iv_se_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
1971 ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m1:
1972 ; CHECK: # %bb.0: # %entry
1973 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1974 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1977 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1978 ret <vscale x 4 x i16> %0
1981 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, iXLen, iXLen)
1983 define <vscale x 8 x i16> @test_sf_vc_v_iv_se_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
1984 ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m2:
1985 ; CHECK: # %bb.0: # %entry
1986 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1987 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
1990 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1991 ret <vscale x 8 x i16> %0
1994 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, iXLen, iXLen)
1996 define <vscale x 16 x i16> @test_sf_vc_v_iv_se_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
1997 ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m4:
1998 ; CHECK: # %bb.0: # %entry
1999 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2000 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2003 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
2004 ret <vscale x 16 x i16> %0
2007 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, iXLen, iXLen)
2009 define <vscale x 32 x i16> @test_sf_vc_v_iv_se_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
2010 ; CHECK-LABEL: test_sf_vc_v_iv_se_e16m8:
2011 ; CHECK: # %bb.0: # %entry
2012 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2013 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2016 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
2017 ret <vscale x 32 x i16> %0
2020 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.se.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, iXLen, iXLen)
2022 define <vscale x 1 x i32> @test_sf_vc_v_iv_se_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
2023 ; CHECK-LABEL: test_sf_vc_v_iv_se_e32mf2:
2024 ; CHECK: # %bb.0: # %entry
2025 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2026 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2029 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
2030 ret <vscale x 1 x i32> %0
2033 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, iXLen, iXLen)
2035 define <vscale x 2 x i32> @test_sf_vc_v_iv_se_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
2036 ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m1:
2037 ; CHECK: # %bb.0: # %entry
2038 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2039 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2042 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
2043 ret <vscale x 2 x i32> %0
2046 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, iXLen, iXLen)
2048 define <vscale x 4 x i32> @test_sf_vc_v_iv_se_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
2049 ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m2:
2050 ; CHECK: # %bb.0: # %entry
2051 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2052 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2055 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
2056 ret <vscale x 4 x i32> %0
2059 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, iXLen, iXLen)
2061 define <vscale x 8 x i32> @test_sf_vc_v_iv_se_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
2062 ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m4:
2063 ; CHECK: # %bb.0: # %entry
2064 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2065 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2068 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
2069 ret <vscale x 8 x i32> %0
2072 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, iXLen, iXLen)
2074 define <vscale x 16 x i32> @test_sf_vc_v_iv_se_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
2075 ; CHECK-LABEL: test_sf_vc_v_iv_se_e32m8:
2076 ; CHECK: # %bb.0: # %entry
2077 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2078 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2081 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
2082 ret <vscale x 16 x i32> %0
2085 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.se.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, iXLen, iXLen)
2087 define <vscale x 1 x i64> @test_sf_vc_v_iv_se_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
2088 ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m1:
2089 ; CHECK: # %bb.0: # %entry
2090 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2091 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2094 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
2095 ret <vscale x 1 x i64> %0
2098 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, iXLen, iXLen)
2100 define <vscale x 2 x i64> @test_sf_vc_v_iv_se_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
2101 ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m2:
2102 ; CHECK: # %bb.0: # %entry
2103 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2104 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2107 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
2108 ret <vscale x 2 x i64> %0
2111 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, iXLen, iXLen)
2113 define <vscale x 4 x i64> @test_sf_vc_v_iv_se_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
2114 ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m4:
2115 ; CHECK: # %bb.0: # %entry
2116 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2117 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2120 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
2121 ret <vscale x 4 x i64> %0
2124 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, iXLen, iXLen)
2126 define <vscale x 8 x i64> @test_sf_vc_v_iv_se_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
2127 ; CHECK-LABEL: test_sf_vc_v_iv_se_e64m8:
2128 ; CHECK: # %bb.0: # %entry
2129 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2130 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2133 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
2134 ret <vscale x 8 x i64> %0
2137 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.se.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, iXLen, iXLen)
2139 define <vscale x 1 x i8> @test_sf_vc_v_iv_e8mf8(<vscale x 1 x i8> %vs2, iXLen %vl) {
2140 ; CHECK-LABEL: test_sf_vc_v_iv_e8mf8:
2141 ; CHECK: # %bb.0: # %entry
2142 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
2143 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2146 %0 = tail call <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
2147 ret <vscale x 1 x i8> %0
2150 declare <vscale x 1 x i8> @llvm.riscv.sf.vc.v.iv.nxv1i8.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i8>, iXLen, iXLen)
2152 define <vscale x 2 x i8> @test_sf_vc_v_iv_e8mf4(<vscale x 2 x i8> %vs2, iXLen %vl) {
2153 ; CHECK-LABEL: test_sf_vc_v_iv_e8mf4:
2154 ; CHECK: # %bb.0: # %entry
2155 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
2156 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2159 %0 = tail call <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
2160 ret <vscale x 2 x i8> %0
2163 declare <vscale x 2 x i8> @llvm.riscv.sf.vc.v.iv.nxv2i8.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i8>, iXLen, iXLen)
2165 define <vscale x 4 x i8> @test_sf_vc_v_iv_e8mf2(<vscale x 4 x i8> %vs2, iXLen %vl) {
2166 ; CHECK-LABEL: test_sf_vc_v_iv_e8mf2:
2167 ; CHECK: # %bb.0: # %entry
2168 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
2169 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2172 %0 = tail call <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
2173 ret <vscale x 4 x i8> %0
2176 declare <vscale x 4 x i8> @llvm.riscv.sf.vc.v.iv.nxv4i8.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i8>, iXLen, iXLen)
2178 define <vscale x 8 x i8> @test_sf_vc_v_iv_e8m1(<vscale x 8 x i8> %vs2, iXLen %vl) {
2179 ; CHECK-LABEL: test_sf_vc_v_iv_e8m1:
2180 ; CHECK: # %bb.0: # %entry
2181 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
2182 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2185 %0 = tail call <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
2186 ret <vscale x 8 x i8> %0
2189 declare <vscale x 8 x i8> @llvm.riscv.sf.vc.v.iv.nxv8i8.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i8>, iXLen, iXLen)
2191 define <vscale x 16 x i8> @test_sf_vc_v_iv_e8m2(<vscale x 16 x i8> %vs2, iXLen %vl) {
2192 ; CHECK-LABEL: test_sf_vc_v_iv_e8m2:
2193 ; CHECK: # %bb.0: # %entry
2194 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
2195 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2198 %0 = tail call <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
2199 ret <vscale x 16 x i8> %0
2202 declare <vscale x 16 x i8> @llvm.riscv.sf.vc.v.iv.nxv16i8.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i8>, iXLen, iXLen)
2204 define <vscale x 32 x i8> @test_sf_vc_v_iv_e8m4(<vscale x 32 x i8> %vs2, iXLen %vl) {
2205 ; CHECK-LABEL: test_sf_vc_v_iv_e8m4:
2206 ; CHECK: # %bb.0: # %entry
2207 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
2208 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2211 %0 = tail call <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
2212 ret <vscale x 32 x i8> %0
2215 declare <vscale x 32 x i8> @llvm.riscv.sf.vc.v.iv.nxv32i8.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i8>, iXLen, iXLen)
2217 define <vscale x 64 x i8> @test_sf_vc_v_iv_e8m8(<vscale x 64 x i8> %vs2, iXLen %vl) {
2218 ; CHECK-LABEL: test_sf_vc_v_iv_e8m8:
2219 ; CHECK: # %bb.0: # %entry
2220 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
2221 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2224 %0 = tail call <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen 3, <vscale x 64 x i8> %vs2, iXLen 10, iXLen %vl)
2225 ret <vscale x 64 x i8> %0
2228 declare <vscale x 64 x i8> @llvm.riscv.sf.vc.v.iv.nxv64i8.iXLen.iXLen.iXLen(iXLen, <vscale x 64 x i8>, iXLen, iXLen)
2230 define <vscale x 1 x i16> @test_sf_vc_v_iv_e16mf4(<vscale x 1 x i16> %vs2, iXLen %vl) {
2231 ; CHECK-LABEL: test_sf_vc_v_iv_e16mf4:
2232 ; CHECK: # %bb.0: # %entry
2233 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2234 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2237 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
2238 ret <vscale x 1 x i16> %0
2241 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.iv.nxv1i16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i16>, iXLen, iXLen)
2243 define <vscale x 2 x i16> @test_sf_vc_v_iv_e16mf2(<vscale x 2 x i16> %vs2, iXLen %vl) {
2244 ; CHECK-LABEL: test_sf_vc_v_iv_e16mf2:
2245 ; CHECK: # %bb.0: # %entry
2246 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2247 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2250 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
2251 ret <vscale x 2 x i16> %0
2254 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.iv.nxv2i16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i16>, iXLen, iXLen)
2256 define <vscale x 4 x i16> @test_sf_vc_v_iv_e16m1(<vscale x 4 x i16> %vs2, iXLen %vl) {
2257 ; CHECK-LABEL: test_sf_vc_v_iv_e16m1:
2258 ; CHECK: # %bb.0: # %entry
2259 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2260 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2263 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
2264 ret <vscale x 4 x i16> %0
2267 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.iv.nxv4i16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i16>, iXLen, iXLen)
2269 define <vscale x 8 x i16> @test_sf_vc_v_iv_e16m2(<vscale x 8 x i16> %vs2, iXLen %vl) {
2270 ; CHECK-LABEL: test_sf_vc_v_iv_e16m2:
2271 ; CHECK: # %bb.0: # %entry
2272 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2273 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2276 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
2277 ret <vscale x 8 x i16> %0
2280 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.iv.nxv8i16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i16>, iXLen, iXLen)
2282 define <vscale x 16 x i16> @test_sf_vc_v_iv_e16m4(<vscale x 16 x i16> %vs2, iXLen %vl) {
2283 ; CHECK-LABEL: test_sf_vc_v_iv_e16m4:
2284 ; CHECK: # %bb.0: # %entry
2285 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2286 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2289 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
2290 ret <vscale x 16 x i16> %0
2293 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.iv.nxv16i16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i16>, iXLen, iXLen)
2295 define <vscale x 32 x i16> @test_sf_vc_v_iv_e16m8(<vscale x 32 x i16> %vs2, iXLen %vl) {
2296 ; CHECK-LABEL: test_sf_vc_v_iv_e16m8:
2297 ; CHECK: # %bb.0: # %entry
2298 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2299 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2302 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vs2, iXLen 10, iXLen %vl)
2303 ret <vscale x 32 x i16> %0
2306 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.iv.nxv32i16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x i16>, iXLen, iXLen)
2308 define <vscale x 1 x i32> @test_sf_vc_v_iv_e32mf2(<vscale x 1 x i32> %vs2, iXLen %vl) {
2309 ; CHECK-LABEL: test_sf_vc_v_iv_e32mf2:
2310 ; CHECK: # %bb.0: # %entry
2311 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2312 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2315 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
2316 ret <vscale x 1 x i32> %0
2319 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.iv.nxv1i32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i32>, iXLen, iXLen)
2321 define <vscale x 2 x i32> @test_sf_vc_v_iv_e32m1(<vscale x 2 x i32> %vs2, iXLen %vl) {
2322 ; CHECK-LABEL: test_sf_vc_v_iv_e32m1:
2323 ; CHECK: # %bb.0: # %entry
2324 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2325 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2328 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
2329 ret <vscale x 2 x i32> %0
2332 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.iv.nxv2i32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i32>, iXLen, iXLen)
2334 define <vscale x 4 x i32> @test_sf_vc_v_iv_e32m2(<vscale x 4 x i32> %vs2, iXLen %vl) {
2335 ; CHECK-LABEL: test_sf_vc_v_iv_e32m2:
2336 ; CHECK: # %bb.0: # %entry
2337 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2338 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2341 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
2342 ret <vscale x 4 x i32> %0
2345 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.iv.nxv4i32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i32>, iXLen, iXLen)
2347 define <vscale x 8 x i32> @test_sf_vc_v_iv_e32m4(<vscale x 8 x i32> %vs2, iXLen %vl) {
2348 ; CHECK-LABEL: test_sf_vc_v_iv_e32m4:
2349 ; CHECK: # %bb.0: # %entry
2350 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2351 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2354 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
2355 ret <vscale x 8 x i32> %0
2358 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.iv.nxv8i32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i32>, iXLen, iXLen)
2360 define <vscale x 16 x i32> @test_sf_vc_v_iv_e32m8(<vscale x 16 x i32> %vs2, iXLen %vl) {
2361 ; CHECK-LABEL: test_sf_vc_v_iv_e32m8:
2362 ; CHECK: # %bb.0: # %entry
2363 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2364 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2367 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vs2, iXLen 10, iXLen %vl)
2368 ret <vscale x 16 x i32> %0
2371 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.iv.nxv16i32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x i32>, iXLen, iXLen)
2373 define <vscale x 1 x i64> @test_sf_vc_v_iv_e64m1(<vscale x 1 x i64> %vs2, iXLen %vl) {
2374 ; CHECK-LABEL: test_sf_vc_v_iv_e64m1:
2375 ; CHECK: # %bb.0: # %entry
2376 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2377 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2380 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vs2, iXLen 10, iXLen %vl)
2381 ret <vscale x 1 x i64> %0
2384 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.iv.nxv1i64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x i64>, iXLen, iXLen)
2386 define <vscale x 2 x i64> @test_sf_vc_v_iv_e64m2(<vscale x 2 x i64> %vs2, iXLen %vl) {
2387 ; CHECK-LABEL: test_sf_vc_v_iv_e64m2:
2388 ; CHECK: # %bb.0: # %entry
2389 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2390 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2393 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vs2, iXLen 10, iXLen %vl)
2394 ret <vscale x 2 x i64> %0
2397 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.iv.nxv2i64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x i64>, iXLen, iXLen)
2399 define <vscale x 4 x i64> @test_sf_vc_v_iv_e64m4(<vscale x 4 x i64> %vs2, iXLen %vl) {
2400 ; CHECK-LABEL: test_sf_vc_v_iv_e64m4:
2401 ; CHECK: # %bb.0: # %entry
2402 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2403 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2406 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vs2, iXLen 10, iXLen %vl)
2407 ret <vscale x 4 x i64> %0
2410 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.iv.nxv4i64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x i64>, iXLen, iXLen)
2412 define <vscale x 8 x i64> @test_sf_vc_v_iv_e64m8(<vscale x 8 x i64> %vs2, iXLen %vl) {
2413 ; CHECK-LABEL: test_sf_vc_v_iv_e64m8:
2414 ; CHECK: # %bb.0: # %entry
2415 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2416 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
2419 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vs2, iXLen 10, iXLen %vl)
2420 ret <vscale x 8 x i64> %0
2423 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.iv.nxv8i64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x i64>, iXLen, iXLen)
2425 define void @test_sf_vc_fv_se_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
2426 ; CHECK-LABEL: test_sf_vc_fv_se_e16mf4:
2427 ; CHECK: # %bb.0: # %entry
2428 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2429 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2432 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
2436 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i16.f16.iXLen(iXLen, iXLen, <vscale x 1 x i16>, half, iXLen)
2438 define void @test_sf_vc_fv_se_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
2439 ; CHECK-LABEL: test_sf_vc_fv_se_e16mf2:
2440 ; CHECK: # %bb.0: # %entry
2441 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2442 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2445 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
2449 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i16.f16.iXLen(iXLen, iXLen, <vscale x 2 x i16>, half, iXLen)
2451 define void @test_sf_vc_fv_se_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
2452 ; CHECK-LABEL: test_sf_vc_fv_se_e16m1:
2453 ; CHECK: # %bb.0: # %entry
2454 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2455 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2458 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
2462 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i16.f16.iXLen(iXLen, iXLen, <vscale x 4 x i16>, half, iXLen)
2464 define void @test_sf_vc_fv_se_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
2465 ; CHECK-LABEL: test_sf_vc_fv_se_e16m2:
2466 ; CHECK: # %bb.0: # %entry
2467 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2468 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2471 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
2475 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i16.f16.iXLen(iXLen, iXLen, <vscale x 8 x i16>, half, iXLen)
2477 define void @test_sf_vc_fv_se_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
2478 ; CHECK-LABEL: test_sf_vc_fv_se_e16m4:
2479 ; CHECK: # %bb.0: # %entry
2480 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2481 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2484 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
2488 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i16.f16.iXLen(iXLen, iXLen, <vscale x 16 x i16>, half, iXLen)
2490 define void @test_sf_vc_fv_se_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
2491 ; CHECK-LABEL: test_sf_vc_fv_se_e16m8:
2492 ; CHECK: # %bb.0: # %entry
2493 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2494 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2497 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
2501 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32i16.f16.iXLen(iXLen, iXLen, <vscale x 32 x i16>, half, iXLen)
2503 define void @test_sf_vc_fv_se_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
2504 ; CHECK-LABEL: test_sf_vc_fv_se_e32mf2:
2505 ; CHECK: # %bb.0: # %entry
2506 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2507 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2510 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
2514 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i32.f32.iXLen(iXLen, iXLen, <vscale x 1 x i32>, float, iXLen)
2516 define void @test_sf_vc_fv_se_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
2517 ; CHECK-LABEL: test_sf_vc_fv_se_e32m1:
2518 ; CHECK: # %bb.0: # %entry
2519 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2520 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2523 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
2527 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i32.f32.iXLen(iXLen, iXLen, <vscale x 2 x i32>, float, iXLen)
2529 define void @test_sf_vc_fv_se_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
2530 ; CHECK-LABEL: test_sf_vc_fv_se_e32m2:
2531 ; CHECK: # %bb.0: # %entry
2532 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2533 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2536 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
2540 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i32.f32.iXLen(iXLen, iXLen, <vscale x 4 x i32>, float, iXLen)
2542 define void @test_sf_vc_fv_se_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
2543 ; CHECK-LABEL: test_sf_vc_fv_se_e32m4:
2544 ; CHECK: # %bb.0: # %entry
2545 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2546 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2549 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
2553 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i32.f32.iXLen(iXLen, iXLen, <vscale x 8 x i32>, float, iXLen)
2555 define void @test_sf_vc_fv_se_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
2556 ; CHECK-LABEL: test_sf_vc_fv_se_e32m8:
2557 ; CHECK: # %bb.0: # %entry
2558 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2559 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2562 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
2566 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16i32.f32.iXLen(iXLen, iXLen, <vscale x 16 x i32>, float, iXLen)
2568 define void @test_sf_vc_fv_se_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
2569 ; CHECK-LABEL: test_sf_vc_fv_se_e64m1:
2570 ; CHECK: # %bb.0: # %entry
2571 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2572 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2575 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
2579 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1i64.f64.iXLen(iXLen, iXLen, <vscale x 1 x i64>, double, iXLen)
2581 define void @test_sf_vc_fv_se_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
2582 ; CHECK-LABEL: test_sf_vc_fv_se_e64m2:
2583 ; CHECK: # %bb.0: # %entry
2584 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2585 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2588 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
2592 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2i64.f64.iXLen(iXLen, iXLen, <vscale x 2 x i64>, double, iXLen)
2594 define void @test_sf_vc_fv_se_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
2595 ; CHECK-LABEL: test_sf_vc_fv_se_e64m4:
2596 ; CHECK: # %bb.0: # %entry
2597 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2598 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2601 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
2605 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4i64.f64.iXLen(iXLen, iXLen, <vscale x 4 x i64>, double, iXLen)
2607 define void @test_sf_vc_fv_se_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
2608 ; CHECK-LABEL: test_sf_vc_fv_se_e64m8:
2609 ; CHECK: # %bb.0: # %entry
2610 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2611 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
2614 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
2618 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8i64.f64.iXLen(iXLen, iXLen, <vscale x 8 x i64>, double, iXLen)
2620 define <vscale x 1 x i16> @test_sf_vc_v_fv_se_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
2621 ; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf4:
2622 ; CHECK: # %bb.0: # %entry
2623 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2624 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2627 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
2628 ret <vscale x 1 x i16> %0
2631 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, half, iXLen)
2633 define <vscale x 2 x i16> @test_sf_vc_v_fv_se_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
2634 ; CHECK-LABEL: test_sf_vc_v_fv_se_e16mf2:
2635 ; CHECK: # %bb.0: # %entry
2636 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2637 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2640 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
2641 ret <vscale x 2 x i16> %0
2644 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, half, iXLen)
2646 define <vscale x 4 x i16> @test_sf_vc_v_fv_se_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
2647 ; CHECK-LABEL: test_sf_vc_v_fv_se_e16m1:
2648 ; CHECK: # %bb.0: # %entry
2649 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2650 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2653 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
2654 ret <vscale x 4 x i16> %0
2657 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, half, iXLen)
2659 define <vscale x 8 x i16> @test_sf_vc_v_fv_se_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
2660 ; CHECK-LABEL: test_sf_vc_v_fv_se_e16m2:
2661 ; CHECK: # %bb.0: # %entry
2662 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2663 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2666 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
2667 ret <vscale x 8 x i16> %0
2670 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, half, iXLen)
2672 define <vscale x 16 x i16> @test_sf_vc_v_fv_se_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
2673 ; CHECK-LABEL: test_sf_vc_v_fv_se_e16m4:
2674 ; CHECK: # %bb.0: # %entry
2675 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2676 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2679 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
2680 ret <vscale x 16 x i16> %0
2683 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, half, iXLen)
2685 define <vscale x 32 x i16> @test_sf_vc_v_fv_se_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
2686 ; CHECK-LABEL: test_sf_vc_v_fv_se_e16m8:
2687 ; CHECK: # %bb.0: # %entry
2688 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2689 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2692 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
2693 ret <vscale x 32 x i16> %0
2696 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.se.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, half, iXLen)
2698 define <vscale x 1 x i32> @test_sf_vc_v_fv_se_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
2699 ; CHECK-LABEL: test_sf_vc_v_fv_se_e32mf2:
2700 ; CHECK: # %bb.0: # %entry
2701 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2702 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2705 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
2706 ret <vscale x 1 x i32> %0
2709 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv1i32.iXLen.f32.iXLen(iXLen, <vscale x 1 x i32>, float, iXLen)
2711 define <vscale x 2 x i32> @test_sf_vc_v_fv_se_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
2712 ; CHECK-LABEL: test_sf_vc_v_fv_se_e32m1:
2713 ; CHECK: # %bb.0: # %entry
2714 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2715 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2718 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
2719 ret <vscale x 2 x i32> %0
2722 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv2i32.iXLen.f32.iXLen(iXLen, <vscale x 2 x i32>, float, iXLen)
2724 define <vscale x 4 x i32> @test_sf_vc_v_fv_se_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
2725 ; CHECK-LABEL: test_sf_vc_v_fv_se_e32m2:
2726 ; CHECK: # %bb.0: # %entry
2727 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2728 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2731 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
2732 ret <vscale x 4 x i32> %0
2735 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv4i32.iXLen.f32.iXLen(iXLen, <vscale x 4 x i32>, float, iXLen)
2737 define <vscale x 8 x i32> @test_sf_vc_v_fv_se_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
2738 ; CHECK-LABEL: test_sf_vc_v_fv_se_e32m4:
2739 ; CHECK: # %bb.0: # %entry
2740 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2741 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2744 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
2745 ret <vscale x 8 x i32> %0
2748 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv8i32.iXLen.f32.iXLen(iXLen, <vscale x 8 x i32>, float, iXLen)
2750 define <vscale x 16 x i32> @test_sf_vc_v_fv_se_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
2751 ; CHECK-LABEL: test_sf_vc_v_fv_se_e32m8:
2752 ; CHECK: # %bb.0: # %entry
2753 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2754 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2757 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
2758 ret <vscale x 16 x i32> %0
2761 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.se.nxv16i32.iXLen.f32.iXLen(iXLen, <vscale x 16 x i32>, float, iXLen)
2763 define <vscale x 1 x i64> @test_sf_vc_v_fv_se_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
2764 ; CHECK-LABEL: test_sf_vc_v_fv_se_e64m1:
2765 ; CHECK: # %bb.0: # %entry
2766 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2767 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2770 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
2771 ret <vscale x 1 x i64> %0
2774 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv1i64.iXLen.f64.iXLen(iXLen, <vscale x 1 x i64>, double, iXLen)
2776 define <vscale x 2 x i64> @test_sf_vc_v_fv_se_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
2777 ; CHECK-LABEL: test_sf_vc_v_fv_se_e64m2:
2778 ; CHECK: # %bb.0: # %entry
2779 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2780 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2783 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
2784 ret <vscale x 2 x i64> %0
2787 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv2i64.iXLen.f64.iXLen(iXLen, <vscale x 2 x i64>, double, iXLen)
2789 define <vscale x 4 x i64> @test_sf_vc_v_fv_se_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
2790 ; CHECK-LABEL: test_sf_vc_v_fv_se_e64m4:
2791 ; CHECK: # %bb.0: # %entry
2792 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2793 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2796 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
2797 ret <vscale x 4 x i64> %0
2800 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv4i64.iXLen.f64.iXLen(iXLen, <vscale x 4 x i64>, double, iXLen)
2802 define <vscale x 8 x i64> @test_sf_vc_v_fv_se_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
2803 ; CHECK-LABEL: test_sf_vc_v_fv_se_e64m8:
2804 ; CHECK: # %bb.0: # %entry
2805 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
2806 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2809 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
2810 ret <vscale x 8 x i64> %0
2813 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.se.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, double, iXLen)
2815 define <vscale x 1 x i16> @test_sf_vc_v_fv_e16mf4(<vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
2816 ; CHECK-LABEL: test_sf_vc_v_fv_e16mf4:
2817 ; CHECK: # %bb.0: # %entry
2818 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2819 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2822 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
2823 ret <vscale x 1 x i16> %0
2826 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.fv.nxv1i16.iXLen.f16.iXLen(iXLen, <vscale x 1 x i16>, half, iXLen)
2828 define <vscale x 2 x i16> @test_sf_vc_v_fv_e16mf2(<vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
2829 ; CHECK-LABEL: test_sf_vc_v_fv_e16mf2:
2830 ; CHECK: # %bb.0: # %entry
2831 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2832 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2835 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
2836 ret <vscale x 2 x i16> %0
2839 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.fv.nxv2i16.iXLen.f16.iXLen(iXLen, <vscale x 2 x i16>, half, iXLen)
2841 define <vscale x 4 x i16> @test_sf_vc_v_fv_e16m1(<vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
2842 ; CHECK-LABEL: test_sf_vc_v_fv_e16m1:
2843 ; CHECK: # %bb.0: # %entry
2844 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2845 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2848 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
2849 ret <vscale x 4 x i16> %0
2852 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.fv.nxv4i16.iXLen.f16.iXLen(iXLen, <vscale x 4 x i16>, half, iXLen)
2854 define <vscale x 8 x i16> @test_sf_vc_v_fv_e16m2(<vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
2855 ; CHECK-LABEL: test_sf_vc_v_fv_e16m2:
2856 ; CHECK: # %bb.0: # %entry
2857 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2858 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2861 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
2862 ret <vscale x 8 x i16> %0
2865 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.fv.nxv8i16.iXLen.f16.iXLen(iXLen, <vscale x 8 x i16>, half, iXLen)
2867 define <vscale x 16 x i16> @test_sf_vc_v_fv_e16m4(<vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
2868 ; CHECK-LABEL: test_sf_vc_v_fv_e16m4:
2869 ; CHECK: # %bb.0: # %entry
2870 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2871 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2874 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
2875 ret <vscale x 16 x i16> %0
2878 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.fv.nxv16i16.iXLen.f16.iXLen(iXLen, <vscale x 16 x i16>, half, iXLen)
2880 define <vscale x 32 x i16> @test_sf_vc_v_fv_e16m8(<vscale x 32 x i16> %vs2, half %fs1, iXLen %vl) {
2881 ; CHECK-LABEL: test_sf_vc_v_fv_e16m8:
2882 ; CHECK: # %bb.0: # %entry
2883 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
2884 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2887 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x i16> %vs2, half %fs1, iXLen %vl)
2888 ret <vscale x 32 x i16> %0
2891 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.fv.nxv32i16.iXLen.f16.iXLen(iXLen, <vscale x 32 x i16>, half, iXLen)
2893 define <vscale x 1 x i32> @test_sf_vc_v_fv_e32mf2(<vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
2894 ; CHECK-LABEL: test_sf_vc_v_fv_e32mf2:
2895 ; CHECK: # %bb.0: # %entry
2896 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2897 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2900 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
2901 ret <vscale x 1 x i32> %0
2904 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fv.nxv1i32.iXLen.f32.iXLen(iXLen, <vscale x 1 x i32>, float, iXLen)
2906 define <vscale x 2 x i32> @test_sf_vc_v_fv_e32m1(<vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
2907 ; CHECK-LABEL: test_sf_vc_v_fv_e32m1:
2908 ; CHECK: # %bb.0: # %entry
2909 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2910 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2913 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
2914 ret <vscale x 2 x i32> %0
2917 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fv.nxv2i32.iXLen.f32.iXLen(iXLen, <vscale x 2 x i32>, float, iXLen)
2919 define <vscale x 4 x i32> @test_sf_vc_v_fv_e32m2(<vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
2920 ; CHECK-LABEL: test_sf_vc_v_fv_e32m2:
2921 ; CHECK: # %bb.0: # %entry
2922 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2923 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2926 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
2927 ret <vscale x 4 x i32> %0
2930 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fv.nxv4i32.iXLen.f32.iXLen(iXLen, <vscale x 4 x i32>, float, iXLen)
2932 define <vscale x 8 x i32> @test_sf_vc_v_fv_e32m4(<vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
2933 ; CHECK-LABEL: test_sf_vc_v_fv_e32m4:
2934 ; CHECK: # %bb.0: # %entry
2935 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2936 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2939 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
2940 ret <vscale x 8 x i32> %0
2943 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fv.nxv8i32.iXLen.f32.iXLen(iXLen, <vscale x 8 x i32>, float, iXLen)
2945 define <vscale x 16 x i32> @test_sf_vc_v_fv_e32m8(<vscale x 16 x i32> %vs2, float %fs1, iXLen %vl) {
2946 ; CHECK-LABEL: test_sf_vc_v_fv_e32m8:
2947 ; CHECK: # %bb.0: # %entry
2948 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
2949 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2952 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x i32> %vs2, float %fs1, iXLen %vl)
2953 ret <vscale x 16 x i32> %0
2956 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fv.nxv16i32.iXLen.f32.iXLen(iXLen, <vscale x 16 x i32>, float, iXLen)
2958 define <vscale x 1 x i64> @test_sf_vc_v_fv_e64m1(<vscale x 1 x i64> %vs2, double %fs1, iXLen %vl) {
2959 ; CHECK-LABEL: test_sf_vc_v_fv_e64m1:
2960 ; CHECK: # %bb.0: # %entry
2961 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
2962 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2965 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x i64> %vs2, double %fs1, iXLen %vl)
2966 ret <vscale x 1 x i64> %0
2969 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fv.nxv1i64.iXLen.f64.iXLen(iXLen, <vscale x 1 x i64>, double, iXLen)
2971 define <vscale x 2 x i64> @test_sf_vc_v_fv_e64m2(<vscale x 2 x i64> %vs2, double %fs1, iXLen %vl) {
2972 ; CHECK-LABEL: test_sf_vc_v_fv_e64m2:
2973 ; CHECK: # %bb.0: # %entry
2974 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
2975 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2978 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x i64> %vs2, double %fs1, iXLen %vl)
2979 ret <vscale x 2 x i64> %0
2982 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fv.nxv2i64.iXLen.f64.iXLen(iXLen, <vscale x 2 x i64>, double, iXLen)
2984 define <vscale x 4 x i64> @test_sf_vc_v_fv_e64m4(<vscale x 4 x i64> %vs2, double %fs1, iXLen %vl) {
2985 ; CHECK-LABEL: test_sf_vc_v_fv_e64m4:
2986 ; CHECK: # %bb.0: # %entry
2987 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
2988 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
2991 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x i64> %vs2, double %fs1, iXLen %vl)
2992 ret <vscale x 4 x i64> %0
2995 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fv.nxv4i64.iXLen.f64.iXLen(iXLen, <vscale x 4 x i64>, double, iXLen)
2997 define <vscale x 8 x i64> @test_sf_vc_v_fv_e64m8(<vscale x 8 x i64> %vs2, double %fs1, iXLen %vl) {
2998 ; CHECK-LABEL: test_sf_vc_v_fv_e64m8:
2999 ; CHECK: # %bb.0: # %entry
3000 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
3001 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
3004 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x i64> %vs2, double %fs1, iXLen %vl)
3005 ret <vscale x 8 x i64> %0
3008 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fv.nxv8i64.iXLen.f64.iXLen(iXLen, <vscale x 8 x i64>, double, iXLen)
3010 define void @test_f_sf_vc_vv_se_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
3011 ; CHECK-LABEL: test_f_sf_vc_vv_se_e16mf4:
3012 ; CHECK: # %bb.0: # %entry
3013 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3014 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
3017 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
3021 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
3023 define void @test_f_sf_vc_vv_se_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
3024 ; CHECK-LABEL: test_f_sf_vc_vv_se_e16mf2:
3025 ; CHECK: # %bb.0: # %entry
3026 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3027 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
3030 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
3034 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
3036 define void @test_f_sf_vc_vv_se_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
3037 ; CHECK-LABEL: test_f_sf_vc_vv_se_e16m1:
3038 ; CHECK: # %bb.0: # %entry
3039 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3040 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
3043 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
3047 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
3049 define void @test_f_sf_vc_vv_se_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
3050 ; CHECK-LABEL: test_f_sf_vc_vv_se_e16m2:
3051 ; CHECK: # %bb.0: # %entry
3052 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3053 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
3056 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
3060 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
3062 define void @test_f_sf_vc_vv_se_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
3063 ; CHECK-LABEL: test_f_sf_vc_vv_se_e16m4:
3064 ; CHECK: # %bb.0: # %entry
3065 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3066 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
3069 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
3073 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
3075 define void @test_f_sf_vc_vv_se_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
3076 ; CHECK-LABEL: test_f_sf_vc_vv_se_e16m8:
3077 ; CHECK: # %bb.0: # %entry
3078 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3079 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
3082 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
3086 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv32f16.nxv32f16.iXLen(iXLen, iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
3088 define void @test_f_sf_vc_vv_se_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
3089 ; CHECK-LABEL: test_f_sf_vc_vv_se_e32mf2:
3090 ; CHECK: # %bb.0: # %entry
3091 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3092 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
3095 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
3099 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
3101 define void @test_f_sf_vc_vv_se_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
3102 ; CHECK-LABEL: test_f_sf_vc_vv_se_e32m1:
3103 ; CHECK: # %bb.0: # %entry
3104 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3105 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
3108 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
3112 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
3114 define void @test_f_sf_vc_vv_se_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
3115 ; CHECK-LABEL: test_f_sf_vc_vv_se_e32m2:
3116 ; CHECK: # %bb.0: # %entry
3117 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3118 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
3121 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
3125 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
3127 define void @test_f_sf_vc_vv_se_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
3128 ; CHECK-LABEL: test_f_sf_vc_vv_se_e32m4:
3129 ; CHECK: # %bb.0: # %entry
3130 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3131 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
3134 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
3138 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
3140 define void @test_f_sf_vc_vv_se_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
3141 ; CHECK-LABEL: test_f_sf_vc_vv_se_e32m8:
3142 ; CHECK: # %bb.0: # %entry
3143 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
3144 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
3147 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
3151 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv16f32.nxv16f32.iXLen(iXLen, iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
3153 define void @test_f_sf_vc_vv_se_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
3154 ; CHECK-LABEL: test_f_sf_vc_vv_se_e64m1:
3155 ; CHECK: # %bb.0: # %entry
3156 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
3157 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v9
3160 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen 3, iXLen 31, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
3164 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv1f64.nxv1f64.iXLen(iXLen, iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
3166 define void @test_f_sf_vc_vv_se_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
3167 ; CHECK-LABEL: test_f_sf_vc_vv_se_e64m2:
3168 ; CHECK: # %bb.0: # %entry
3169 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3170 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v10
3173 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen 3, iXLen 31, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
3177 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv2f64.nxv2f64.iXLen(iXLen, iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
3179 define void @test_f_sf_vc_vv_se_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
3180 ; CHECK-LABEL: test_f_sf_vc_vv_se_e64m4:
3181 ; CHECK: # %bb.0: # %entry
3182 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
3183 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v12
3186 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen 3, iXLen 31, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
3190 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv4f64.nxv4f64.iXLen(iXLen, iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
3192 define void @test_f_sf_vc_vv_se_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
3193 ; CHECK-LABEL: test_f_sf_vc_vv_se_e64m8:
3194 ; CHECK: # %bb.0: # %entry
3195 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
3196 ; CHECK-NEXT: sf.vc.vv 3, 31, v8, v16
3199 tail call void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen 3, iXLen 31, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
3203 declare void @llvm.riscv.sf.vc.vv.se.iXLen.nxv8f64.nxv8f64.iXLen(iXLen, iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
3205 define <vscale x 1 x half> @test_f_sf_vc_v_vv_se_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
3206 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16mf4:
3207 ; CHECK: # %bb.0: # %entry
3208 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3209 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3212 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
3213 ret <vscale x 1 x half> %0
3216 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.se.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
3218 define <vscale x 2 x half> @test_f_sf_vc_v_vv_se_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
3219 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16mf2:
3220 ; CHECK: # %bb.0: # %entry
3221 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3222 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3225 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
3226 ret <vscale x 2 x half> %0
3229 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.se.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
3231 define <vscale x 4 x half> @test_f_sf_vc_v_vv_se_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
3232 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m1:
3233 ; CHECK: # %bb.0: # %entry
3234 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3235 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3238 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
3239 ret <vscale x 4 x half> %0
3242 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.se.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
3244 define <vscale x 8 x half> @test_f_sf_vc_v_vv_se_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
3245 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m2:
3246 ; CHECK: # %bb.0: # %entry
3247 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3248 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
3251 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
3252 ret <vscale x 8 x half> %0
3255 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.se.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
3257 define <vscale x 16 x half> @test_f_sf_vc_v_vv_se_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
3258 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m4:
3259 ; CHECK: # %bb.0: # %entry
3260 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3261 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
3264 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
3265 ret <vscale x 16 x half> %0
3268 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.se.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
3270 define <vscale x 32 x half> @test_f_sf_vc_v_vv_se_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
3271 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e16m8:
3272 ; CHECK: # %bb.0: # %entry
3273 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3274 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
3277 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
3278 ret <vscale x 32 x half> %0
3281 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.se.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
3283 define <vscale x 1 x float> @test_f_sf_vc_v_vv_se_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
3284 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32mf2:
3285 ; CHECK: # %bb.0: # %entry
3286 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3287 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3290 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
3291 ret <vscale x 1 x float> %0
3294 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.se.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
3296 define <vscale x 2 x float> @test_f_sf_vc_v_vv_se_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
3297 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m1:
3298 ; CHECK: # %bb.0: # %entry
3299 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3300 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3303 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
3304 ret <vscale x 2 x float> %0
3307 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.se.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
3309 define <vscale x 4 x float> @test_f_sf_vc_v_vv_se_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
3310 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m2:
3311 ; CHECK: # %bb.0: # %entry
3312 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3313 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
3316 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
3317 ret <vscale x 4 x float> %0
3320 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.se.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
3322 define <vscale x 8 x float> @test_f_sf_vc_v_vv_se_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
3323 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m4:
3324 ; CHECK: # %bb.0: # %entry
3325 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3326 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
3329 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
3330 ret <vscale x 8 x float> %0
3333 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.se.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
3335 define <vscale x 16 x float> @test_f_sf_vc_v_vv_se_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
3336 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e32m8:
3337 ; CHECK: # %bb.0: # %entry
3338 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
3339 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
3342 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
3343 ret <vscale x 16 x float> %0
3346 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.se.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
3348 define <vscale x 1 x double> @test_f_sf_vc_v_vv_se_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
3349 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m1:
3350 ; CHECK: # %bb.0: # %entry
3351 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
3352 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3355 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
3356 ret <vscale x 1 x double> %0
3359 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.se.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
3361 define <vscale x 2 x double> @test_f_sf_vc_v_vv_se_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
3362 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m2:
3363 ; CHECK: # %bb.0: # %entry
3364 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3365 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
3368 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
3369 ret <vscale x 2 x double> %0
3372 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.se.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
3374 define <vscale x 4 x double> @test_f_sf_vc_v_vv_se_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
3375 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m4:
3376 ; CHECK: # %bb.0: # %entry
3377 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
3378 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
3381 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
3382 ret <vscale x 4 x double> %0
3385 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.se.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
3387 define <vscale x 8 x double> @test_f_sf_vc_v_vv_se_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
3388 ; CHECK-LABEL: test_f_sf_vc_v_vv_se_e64m8:
3389 ; CHECK: # %bb.0: # %entry
3390 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
3391 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
3394 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
3395 ret <vscale x 8 x double> %0
3398 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.se.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
3400 define <vscale x 1 x half> @test_f_sf_vc_v_vv_e16mf4(<vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
3401 ; CHECK-LABEL: test_f_sf_vc_v_vv_e16mf4:
3402 ; CHECK: # %bb.0: # %entry
3403 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3404 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3407 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
3408 ret <vscale x 1 x half> %0
3411 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.vv.nxv1f16.iXLen.nxv1f16.iXLen(iXLen, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
3413 define <vscale x 2 x half> @test_f_sf_vc_v_vv_e16mf2(<vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
3414 ; CHECK-LABEL: test_f_sf_vc_v_vv_e16mf2:
3415 ; CHECK: # %bb.0: # %entry
3416 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3417 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3420 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
3421 ret <vscale x 2 x half> %0
3424 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.vv.nxv2f16.iXLen.nxv2f16.iXLen(iXLen, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
3426 define <vscale x 4 x half> @test_f_sf_vc_v_vv_e16m1(<vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
3427 ; CHECK-LABEL: test_f_sf_vc_v_vv_e16m1:
3428 ; CHECK: # %bb.0: # %entry
3429 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3430 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3433 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
3434 ret <vscale x 4 x half> %0
3437 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.vv.nxv4f16.iXLen.nxv4f16.iXLen(iXLen, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
3439 define <vscale x 8 x half> @test_f_sf_vc_v_vv_e16m2(<vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
3440 ; CHECK-LABEL: test_f_sf_vc_v_vv_e16m2:
3441 ; CHECK: # %bb.0: # %entry
3442 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3443 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
3446 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
3447 ret <vscale x 8 x half> %0
3450 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.vv.nxv8f16.iXLen.nxv8f16.iXLen(iXLen, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
3452 define <vscale x 16 x half> @test_f_sf_vc_v_vv_e16m4(<vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
3453 ; CHECK-LABEL: test_f_sf_vc_v_vv_e16m4:
3454 ; CHECK: # %bb.0: # %entry
3455 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3456 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
3459 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
3460 ret <vscale x 16 x half> %0
3463 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.vv.nxv16f16.iXLen.nxv16f16.iXLen(iXLen, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
3465 define <vscale x 32 x half> @test_f_sf_vc_v_vv_e16m8(<vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl) {
3466 ; CHECK-LABEL: test_f_sf_vc_v_vv_e16m8:
3467 ; CHECK: # %bb.0: # %entry
3468 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
3469 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
3472 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, <vscale x 32 x half> %vs1, iXLen %vl)
3473 ret <vscale x 32 x half> %0
3476 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.vv.nxv32f16.iXLen.nxv32f16.iXLen(iXLen, <vscale x 32 x half>, <vscale x 32 x half>, iXLen)
3478 define <vscale x 1 x float> @test_f_sf_vc_v_vv_e32mf2(<vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
3479 ; CHECK-LABEL: test_f_sf_vc_v_vv_e32mf2:
3480 ; CHECK: # %bb.0: # %entry
3481 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3482 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3485 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
3486 ret <vscale x 1 x float> %0
3489 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vv.nxv1f32.iXLen.nxv1f32.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
3491 define <vscale x 2 x float> @test_f_sf_vc_v_vv_e32m1(<vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
3492 ; CHECK-LABEL: test_f_sf_vc_v_vv_e32m1:
3493 ; CHECK: # %bb.0: # %entry
3494 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3495 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3498 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
3499 ret <vscale x 2 x float> %0
3502 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vv.nxv2f32.iXLen.nxv2f32.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
3504 define <vscale x 4 x float> @test_f_sf_vc_v_vv_e32m2(<vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
3505 ; CHECK-LABEL: test_f_sf_vc_v_vv_e32m2:
3506 ; CHECK: # %bb.0: # %entry
3507 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3508 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
3511 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
3512 ret <vscale x 4 x float> %0
3515 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vv.nxv4f32.iXLen.nxv4f32.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
3517 define <vscale x 8 x float> @test_f_sf_vc_v_vv_e32m4(<vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
3518 ; CHECK-LABEL: test_f_sf_vc_v_vv_e32m4:
3519 ; CHECK: # %bb.0: # %entry
3520 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3521 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
3524 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
3525 ret <vscale x 8 x float> %0
3528 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vv.nxv8f32.iXLen.nxv8f32.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
3530 define <vscale x 16 x float> @test_f_sf_vc_v_vv_e32m8(<vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl) {
3531 ; CHECK-LABEL: test_f_sf_vc_v_vv_e32m8:
3532 ; CHECK: # %bb.0: # %entry
3533 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
3534 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
3537 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, <vscale x 16 x float> %vs1, iXLen %vl)
3538 ret <vscale x 16 x float> %0
3541 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vv.nxv16f32.iXLen.nxv16f32.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x float>, iXLen)
3543 define <vscale x 1 x double> @test_f_sf_vc_v_vv_e64m1(<vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl) {
3544 ; CHECK-LABEL: test_f_sf_vc_v_vv_e64m1:
3545 ; CHECK: # %bb.0: # %entry
3546 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
3547 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v9
3550 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen 3, <vscale x 1 x double> %vs2, <vscale x 1 x double> %vs1, iXLen %vl)
3551 ret <vscale x 1 x double> %0
3554 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vv.nxv1f64.iXLen.nxv1f64.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x double>, iXLen)
3556 define <vscale x 2 x double> @test_f_sf_vc_v_vv_e64m2(<vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl) {
3557 ; CHECK-LABEL: test_f_sf_vc_v_vv_e64m2:
3558 ; CHECK: # %bb.0: # %entry
3559 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
3560 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v10
3563 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen 3, <vscale x 2 x double> %vs2, <vscale x 2 x double> %vs1, iXLen %vl)
3564 ret <vscale x 2 x double> %0
3567 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vv.nxv2f64.iXLen.nxv2f64.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x double>, iXLen)
3569 define <vscale x 4 x double> @test_f_sf_vc_v_vv_e64m4(<vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl) {
3570 ; CHECK-LABEL: test_f_sf_vc_v_vv_e64m4:
3571 ; CHECK: # %bb.0: # %entry
3572 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
3573 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v12
3576 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen 3, <vscale x 4 x double> %vs2, <vscale x 4 x double> %vs1, iXLen %vl)
3577 ret <vscale x 4 x double> %0
3580 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vv.nxv4f64.iXLen.nxv4f64.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x double>, iXLen)
3582 define <vscale x 8 x double> @test_f_sf_vc_v_vv_e64m8(<vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl) {
3583 ; CHECK-LABEL: test_f_sf_vc_v_vv_e64m8:
3584 ; CHECK: # %bb.0: # %entry
3585 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
3586 ; CHECK-NEXT: sf.vc.v.vv 3, v8, v8, v16
3589 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen 3, <vscale x 8 x double> %vs2, <vscale x 8 x double> %vs1, iXLen %vl)
3590 ret <vscale x 8 x double> %0
3593 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vv.nxv8f64.iXLen.nxv8f64.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x double>, iXLen)
3595 define void @test_f_sf_vc_xv_se_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3596 ; CHECK-LABEL: test_f_sf_vc_xv_se_e16mf4:
3597 ; CHECK: # %bb.0: # %entry
3598 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3599 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3602 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
3606 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f16.i16.iXLen(iXLen, iXLen, <vscale x 1 x half>, i16, iXLen)
3608 define void @test_f_sf_vc_xv_se_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3609 ; CHECK-LABEL: test_f_sf_vc_xv_se_e16mf2:
3610 ; CHECK: # %bb.0: # %entry
3611 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3612 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3615 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
3619 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f16.i16.iXLen(iXLen, iXLen, <vscale x 2 x half>, i16, iXLen)
3621 define void @test_f_sf_vc_xv_se_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3622 ; CHECK-LABEL: test_f_sf_vc_xv_se_e16m1:
3623 ; CHECK: # %bb.0: # %entry
3624 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3625 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3628 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
3632 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f16.i16.iXLen(iXLen, iXLen, <vscale x 4 x half>, i16, iXLen)
3634 define void @test_f_sf_vc_xv_se_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3635 ; CHECK-LABEL: test_f_sf_vc_xv_se_e16m2:
3636 ; CHECK: # %bb.0: # %entry
3637 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3638 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3641 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
3645 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f16.i16.iXLen(iXLen, iXLen, <vscale x 8 x half>, i16, iXLen)
3647 define void @test_f_sf_vc_xv_se_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3648 ; CHECK-LABEL: test_f_sf_vc_xv_se_e16m4:
3649 ; CHECK: # %bb.0: # %entry
3650 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3651 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3654 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
3658 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f16.i16.iXLen(iXLen, iXLen, <vscale x 16 x half>, i16, iXLen)
3660 define void @test_f_sf_vc_xv_se_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3661 ; CHECK-LABEL: test_f_sf_vc_xv_se_e16m8:
3662 ; CHECK: # %bb.0: # %entry
3663 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3664 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3667 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.i16.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
3671 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv32f16.i16.iXLen(iXLen, iXLen, <vscale x 32 x half>, i16, iXLen)
3673 define void @test_f_sf_vc_xv_se_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3674 ; CHECK-LABEL: test_f_sf_vc_xv_se_e32mf2:
3675 ; CHECK: # %bb.0: # %entry
3676 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3677 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3680 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
3684 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv1f32.i32.iXLen(iXLen, iXLen, <vscale x 1 x float>, i32, iXLen)
3686 define void @test_f_sf_vc_xv_se_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3687 ; CHECK-LABEL: test_f_sf_vc_xv_se_e32m1:
3688 ; CHECK: # %bb.0: # %entry
3689 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3690 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3693 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
3697 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv2f32.i32.iXLen(iXLen, iXLen, <vscale x 2 x float>, i32, iXLen)
3699 define void @test_f_sf_vc_xv_se_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3700 ; CHECK-LABEL: test_f_sf_vc_xv_se_e32m2:
3701 ; CHECK: # %bb.0: # %entry
3702 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3703 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3706 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
3710 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv4f32.i32.iXLen(iXLen, iXLen, <vscale x 4 x float>, i32, iXLen)
3712 define void @test_f_sf_vc_xv_se_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3713 ; CHECK-LABEL: test_f_sf_vc_xv_se_e32m4:
3714 ; CHECK: # %bb.0: # %entry
3715 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3716 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3719 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
3723 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv8f32.i32.iXLen(iXLen, iXLen, <vscale x 8 x float>, i32, iXLen)
3725 define void @test_f_sf_vc_xv_se_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3726 ; CHECK-LABEL: test_f_sf_vc_xv_se_e32m8:
3727 ; CHECK: # %bb.0: # %entry
3728 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3729 ; CHECK-NEXT: sf.vc.xv 3, 31, v8, a0
3732 tail call void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.i32.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
3736 declare void @llvm.riscv.sf.vc.xv.se.iXLen.nxv16f32.i32.iXLen(iXLen, iXLen, <vscale x 16 x float>, i32, iXLen)
3738 define <vscale x 1 x half> @test_f_sf_vc_v_xv_se_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3739 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16mf4:
3740 ; CHECK: # %bb.0: # %entry
3741 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3742 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3745 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
3746 ret <vscale x 1 x half> %0
3749 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.se.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, i16, iXLen)
3751 define <vscale x 2 x half> @test_f_sf_vc_v_xv_se_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3752 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16mf2:
3753 ; CHECK: # %bb.0: # %entry
3754 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3755 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3758 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
3759 ret <vscale x 2 x half> %0
3762 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.se.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, i16, iXLen)
3764 define <vscale x 4 x half> @test_f_sf_vc_v_xv_se_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3765 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m1:
3766 ; CHECK: # %bb.0: # %entry
3767 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3768 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3771 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
3772 ret <vscale x 4 x half> %0
3775 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.se.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, i16, iXLen)
3777 define <vscale x 8 x half> @test_f_sf_vc_v_xv_se_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3778 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m2:
3779 ; CHECK: # %bb.0: # %entry
3780 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3781 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3784 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
3785 ret <vscale x 8 x half> %0
3788 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.se.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, i16, iXLen)
3790 define <vscale x 16 x half> @test_f_sf_vc_v_xv_se_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3791 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m4:
3792 ; CHECK: # %bb.0: # %entry
3793 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3794 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3797 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
3798 ret <vscale x 16 x half> %0
3801 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.se.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, i16, iXLen)
3803 define <vscale x 32 x half> @test_f_sf_vc_v_xv_se_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3804 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e16m8:
3805 ; CHECK: # %bb.0: # %entry
3806 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3807 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3810 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
3811 ret <vscale x 32 x half> %0
3814 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.se.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, i16, iXLen)
3816 define <vscale x 1 x float> @test_f_sf_vc_v_xv_se_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3817 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32mf2:
3818 ; CHECK: # %bb.0: # %entry
3819 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3820 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3823 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
3824 ret <vscale x 1 x float> %0
3827 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.se.nxv1f32.i32.f32.iXLen(iXLen, <vscale x 1 x float>, i32, iXLen)
3829 define <vscale x 2 x float> @test_f_sf_vc_v_xv_se_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3830 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m1:
3831 ; CHECK: # %bb.0: # %entry
3832 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3833 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3836 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
3837 ret <vscale x 2 x float> %0
3840 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.se.nxv2f32.i32.f32.iXLen(iXLen, <vscale x 2 x float>, i32, iXLen)
3842 define <vscale x 4 x float> @test_f_sf_vc_v_xv_se_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3843 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m2:
3844 ; CHECK: # %bb.0: # %entry
3845 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3846 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3849 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
3850 ret <vscale x 4 x float> %0
3853 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.se.nxv4f32.i32.f32.iXLen(iXLen, <vscale x 4 x float>, i32, iXLen)
3855 define <vscale x 8 x float> @test_f_sf_vc_v_xv_se_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3856 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m4:
3857 ; CHECK: # %bb.0: # %entry
3858 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
3859 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3862 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
3863 ret <vscale x 8 x float> %0
3866 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.se.nxv8f32.i32.f32.iXLen(iXLen, <vscale x 8 x float>, i32, iXLen)
3868 define <vscale x 16 x float> @test_f_sf_vc_v_xv_se_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3869 ; CHECK-LABEL: test_f_sf_vc_v_xv_se_e32m8:
3870 ; CHECK: # %bb.0: # %entry
3871 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
3872 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3875 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
3876 ret <vscale x 16 x float> %0
3879 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.se.nxv16f32.i32.f32.iXLen(iXLen, <vscale x 16 x float>, i32, iXLen)
3881 define <vscale x 1 x half> @test_f_sf_vc_v_xv_e16mf4(<vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3882 ; CHECK-LABEL: test_f_sf_vc_v_xv_e16mf4:
3883 ; CHECK: # %bb.0: # %entry
3884 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3885 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3888 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.nxv1f16.iXLen.i16.iXLen(iXLen 3, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
3889 ret <vscale x 1 x half> %0
3892 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.xv.nxv1f16.iXLen.i16.iXLen(iXLen, <vscale x 1 x half>, i16, iXLen)
3894 define <vscale x 2 x half> @test_f_sf_vc_v_xv_e16mf2(<vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3895 ; CHECK-LABEL: test_f_sf_vc_v_xv_e16mf2:
3896 ; CHECK: # %bb.0: # %entry
3897 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
3898 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3901 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.nxv2f16.iXLen.i16.iXLen(iXLen 3, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
3902 ret <vscale x 2 x half> %0
3905 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.xv.nxv2f16.iXLen.i16.iXLen(iXLen, <vscale x 2 x half>, i16, iXLen)
3907 define <vscale x 4 x half> @test_f_sf_vc_v_xv_e16m1(<vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3908 ; CHECK-LABEL: test_f_sf_vc_v_xv_e16m1:
3909 ; CHECK: # %bb.0: # %entry
3910 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
3911 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3914 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.nxv4f16.iXLen.i16.iXLen(iXLen 3, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
3915 ret <vscale x 4 x half> %0
3918 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.xv.nxv4f16.iXLen.i16.iXLen(iXLen, <vscale x 4 x half>, i16, iXLen)
3920 define <vscale x 8 x half> @test_f_sf_vc_v_xv_e16m2(<vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3921 ; CHECK-LABEL: test_f_sf_vc_v_xv_e16m2:
3922 ; CHECK: # %bb.0: # %entry
3923 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
3924 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3927 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.nxv8f16.iXLen.i16.iXLen(iXLen 3, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
3928 ret <vscale x 8 x half> %0
3931 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.xv.nxv8f16.iXLen.i16.iXLen(iXLen, <vscale x 8 x half>, i16, iXLen)
3933 define <vscale x 16 x half> @test_f_sf_vc_v_xv_e16m4(<vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3934 ; CHECK-LABEL: test_f_sf_vc_v_xv_e16m4:
3935 ; CHECK: # %bb.0: # %entry
3936 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
3937 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3940 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.nxv16f16.iXLen.i16.iXLen(iXLen 3, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
3941 ret <vscale x 16 x half> %0
3944 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.xv.nxv16f16.iXLen.i16.iXLen(iXLen, <vscale x 16 x half>, i16, iXLen)
3946 define <vscale x 32 x half> @test_f_sf_vc_v_xv_e16m8(<vscale x 32 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
3947 ; CHECK-LABEL: test_f_sf_vc_v_xv_e16m8:
3948 ; CHECK: # %bb.0: # %entry
3949 ; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
3950 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3953 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.nxv32f16.iXLen.i16.iXLen(iXLen 3, <vscale x 32 x half> %vs2, i16 %rs1, iXLen %vl)
3954 ret <vscale x 32 x half> %0
3957 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.xv.nxv32f16.iXLen.i16.iXLen(iXLen, <vscale x 32 x half>, i16, iXLen)
3959 define <vscale x 1 x float> @test_f_sf_vc_v_xv_e32mf2(<vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3960 ; CHECK-LABEL: test_f_sf_vc_v_xv_e32mf2:
3961 ; CHECK: # %bb.0: # %entry
3962 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3963 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3966 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.f32.iXLen(iXLen 3, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
3967 ret <vscale x 1 x float> %0
3970 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xv.nxv1f32.i32.f32.iXLen(iXLen, <vscale x 1 x float>, i32, iXLen)
3972 define <vscale x 2 x float> @test_f_sf_vc_v_xv_e32m1(<vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3973 ; CHECK-LABEL: test_f_sf_vc_v_xv_e32m1:
3974 ; CHECK: # %bb.0: # %entry
3975 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3976 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3979 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.f32.iXLen(iXLen 3, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
3980 ret <vscale x 2 x float> %0
3983 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xv.nxv2f32.i32.f32.iXLen(iXLen, <vscale x 2 x float>, i32, iXLen)
3985 define <vscale x 4 x float> @test_f_sf_vc_v_xv_e32m2(<vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3986 ; CHECK-LABEL: test_f_sf_vc_v_xv_e32m2:
3987 ; CHECK: # %bb.0: # %entry
3988 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
3989 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
3992 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.f32.iXLen(iXLen 3, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
3993 ret <vscale x 4 x float> %0
3996 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xv.nxv4f32.i32.f32.iXLen(iXLen, <vscale x 4 x float>, i32, iXLen)
3998 define <vscale x 8 x float> @test_f_sf_vc_v_xv_e32m4(<vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
3999 ; CHECK-LABEL: test_f_sf_vc_v_xv_e32m4:
4000 ; CHECK: # %bb.0: # %entry
4001 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4002 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
4005 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.f32.iXLen(iXLen 3, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
4006 ret <vscale x 8 x float> %0
4009 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xv.nxv8f32.i32.f32.iXLen(iXLen, <vscale x 8 x float>, i32, iXLen)
4011 define <vscale x 16 x float> @test_f_sf_vc_v_xv_e32m8(<vscale x 16 x float> %vs2, i32 signext %rs1, iXLen %vl) {
4012 ; CHECK-LABEL: test_f_sf_vc_v_xv_e32m8:
4013 ; CHECK: # %bb.0: # %entry
4014 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
4015 ; CHECK-NEXT: sf.vc.v.xv 3, v8, v8, a0
4018 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.f32.iXLen(iXLen 3, <vscale x 16 x float> %vs2, i32 %rs1, iXLen %vl)
4019 ret <vscale x 16 x float> %0
4022 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xv.nxv16f32.i32.f32.iXLen(iXLen, <vscale x 16 x float>, i32, iXLen)
4024 define void @test_f_sf_vc_iv_se_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
4025 ; CHECK-LABEL: test_f_sf_vc_iv_se_e16mf4:
4026 ; CHECK: # %bb.0: # %entry
4027 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4028 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4031 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
4035 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f16.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x half>, iXLen, iXLen)
4037 define void @test_f_sf_vc_iv_se_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
4038 ; CHECK-LABEL: test_f_sf_vc_iv_se_e16mf2:
4039 ; CHECK: # %bb.0: # %entry
4040 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4041 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4044 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
4048 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f16.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x half>, iXLen, iXLen)
4050 define void @test_f_sf_vc_iv_se_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
4051 ; CHECK-LABEL: test_f_sf_vc_iv_se_e16m1:
4052 ; CHECK: # %bb.0: # %entry
4053 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4054 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4057 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
4061 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f16.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x half>, iXLen, iXLen)
4063 define void @test_f_sf_vc_iv_se_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
4064 ; CHECK-LABEL: test_f_sf_vc_iv_se_e16m2:
4065 ; CHECK: # %bb.0: # %entry
4066 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4067 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4070 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
4074 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f16.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x half>, iXLen, iXLen)
4076 define void @test_f_sf_vc_iv_se_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
4077 ; CHECK-LABEL: test_f_sf_vc_iv_se_e16m4:
4078 ; CHECK: # %bb.0: # %entry
4079 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4080 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4083 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
4087 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f16.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x half>, iXLen, iXLen)
4089 define void @test_f_sf_vc_iv_se_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
4090 ; CHECK-LABEL: test_f_sf_vc_iv_se_e16m8:
4091 ; CHECK: # %bb.0: # %entry
4092 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4093 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4096 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
4100 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv32f16.iXLen.iXLen(iXLen, iXLen, <vscale x 32 x half>, iXLen, iXLen)
4102 define void @test_f_sf_vc_iv_se_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
4103 ; CHECK-LABEL: test_f_sf_vc_iv_se_e32mf2:
4104 ; CHECK: # %bb.0: # %entry
4105 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4106 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4109 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
4113 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f32.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x float>, iXLen, iXLen)
4115 define void @test_f_sf_vc_iv_se_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
4116 ; CHECK-LABEL: test_f_sf_vc_iv_se_e32m1:
4117 ; CHECK: # %bb.0: # %entry
4118 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4119 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4122 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
4126 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f32.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x float>, iXLen, iXLen)
4128 define void @test_f_sf_vc_iv_se_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
4129 ; CHECK-LABEL: test_f_sf_vc_iv_se_e32m2:
4130 ; CHECK: # %bb.0: # %entry
4131 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4132 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4135 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
4139 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f32.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x float>, iXLen, iXLen)
4141 define void @test_f_sf_vc_iv_se_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
4142 ; CHECK-LABEL: test_f_sf_vc_iv_se_e32m4:
4143 ; CHECK: # %bb.0: # %entry
4144 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4145 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4148 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
4152 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f32.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x float>, iXLen, iXLen)
4154 define void @test_f_sf_vc_iv_se_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
4155 ; CHECK-LABEL: test_f_sf_vc_iv_se_e32m8:
4156 ; CHECK: # %bb.0: # %entry
4157 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4158 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4161 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
4165 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv16f32.iXLen.iXLen(iXLen, iXLen, <vscale x 16 x float>, iXLen, iXLen)
4167 define void @test_f_sf_vc_iv_se_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
4168 ; CHECK-LABEL: test_f_sf_vc_iv_se_e64m1:
4169 ; CHECK: # %bb.0: # %entry
4170 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4171 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4174 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
4178 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv1f64.iXLen.iXLen(iXLen, iXLen, <vscale x 1 x double>, iXLen, iXLen)
4180 define void @test_f_sf_vc_iv_se_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
4181 ; CHECK-LABEL: test_f_sf_vc_iv_se_e64m2:
4182 ; CHECK: # %bb.0: # %entry
4183 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4184 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4187 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
4191 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv2f64.iXLen.iXLen(iXLen, iXLen, <vscale x 2 x double>, iXLen, iXLen)
4193 define void @test_f_sf_vc_iv_se_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
4194 ; CHECK-LABEL: test_f_sf_vc_iv_se_e64m4:
4195 ; CHECK: # %bb.0: # %entry
4196 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4197 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4200 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
4204 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv4f64.iXLen.iXLen(iXLen, iXLen, <vscale x 4 x double>, iXLen, iXLen)
4206 define void @test_f_sf_vc_iv_se_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
4207 ; CHECK-LABEL: test_f_sf_vc_iv_se_e64m8:
4208 ; CHECK: # %bb.0: # %entry
4209 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4210 ; CHECK-NEXT: sf.vc.iv 3, 31, v8, 10
4213 tail call void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen 3, iXLen 31, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
4217 declare void @llvm.riscv.sf.vc.iv.se.iXLen.nxv8f64.iXLen.iXLen(iXLen, iXLen, <vscale x 8 x double>, iXLen, iXLen)
4219 define <vscale x 1 x half> @test_f_sf_vc_v_iv_se_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
4220 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16mf4:
4221 ; CHECK: # %bb.0: # %entry
4222 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4223 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4226 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
4227 ret <vscale x 1 x half> %0
4230 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.se.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, iXLen, iXLen)
4232 define <vscale x 2 x half> @test_f_sf_vc_v_iv_se_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
4233 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16mf2:
4234 ; CHECK: # %bb.0: # %entry
4235 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4236 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4239 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
4240 ret <vscale x 2 x half> %0
4243 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.se.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, iXLen, iXLen)
4245 define <vscale x 4 x half> @test_f_sf_vc_v_iv_se_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
4246 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m1:
4247 ; CHECK: # %bb.0: # %entry
4248 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4249 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4252 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
4253 ret <vscale x 4 x half> %0
4256 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.se.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, iXLen, iXLen)
4258 define <vscale x 8 x half> @test_f_sf_vc_v_iv_se_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
4259 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m2:
4260 ; CHECK: # %bb.0: # %entry
4261 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4262 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4265 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
4266 ret <vscale x 8 x half> %0
4269 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.se.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, iXLen, iXLen)
4271 define <vscale x 16 x half> @test_f_sf_vc_v_iv_se_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
4272 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m4:
4273 ; CHECK: # %bb.0: # %entry
4274 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4275 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4278 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
4279 ret <vscale x 16 x half> %0
4282 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.se.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, iXLen, iXLen)
4284 define <vscale x 32 x half> @test_f_sf_vc_v_iv_se_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
4285 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e16m8:
4286 ; CHECK: # %bb.0: # %entry
4287 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4288 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4291 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
4292 ret <vscale x 32 x half> %0
4295 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.se.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, iXLen, iXLen)
4297 define <vscale x 1 x float> @test_f_sf_vc_v_iv_se_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
4298 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32mf2:
4299 ; CHECK: # %bb.0: # %entry
4300 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4301 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4304 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
4305 ret <vscale x 1 x float> %0
4308 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.se.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, iXLen, iXLen)
4310 define <vscale x 2 x float> @test_f_sf_vc_v_iv_se_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
4311 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m1:
4312 ; CHECK: # %bb.0: # %entry
4313 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4314 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4317 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
4318 ret <vscale x 2 x float> %0
4321 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.se.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, iXLen, iXLen)
4323 define <vscale x 4 x float> @test_f_sf_vc_v_iv_se_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
4324 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m2:
4325 ; CHECK: # %bb.0: # %entry
4326 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4327 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4330 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
4331 ret <vscale x 4 x float> %0
4334 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.se.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, iXLen, iXLen)
4336 define <vscale x 8 x float> @test_f_sf_vc_v_iv_se_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
4337 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m4:
4338 ; CHECK: # %bb.0: # %entry
4339 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4340 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4343 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
4344 ret <vscale x 8 x float> %0
4347 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.se.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, iXLen, iXLen)
4349 define <vscale x 16 x float> @test_f_sf_vc_v_iv_se_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
4350 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e32m8:
4351 ; CHECK: # %bb.0: # %entry
4352 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4353 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4356 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
4357 ret <vscale x 16 x float> %0
4360 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.se.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, iXLen, iXLen)
4362 define <vscale x 1 x double> @test_f_sf_vc_v_iv_se_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
4363 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m1:
4364 ; CHECK: # %bb.0: # %entry
4365 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4366 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4369 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
4370 ret <vscale x 1 x double> %0
4373 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.se.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, iXLen, iXLen)
4375 define <vscale x 2 x double> @test_f_sf_vc_v_iv_se_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
4376 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m2:
4377 ; CHECK: # %bb.0: # %entry
4378 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4379 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4382 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
4383 ret <vscale x 2 x double> %0
4386 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.se.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, iXLen, iXLen)
4388 define <vscale x 4 x double> @test_f_sf_vc_v_iv_se_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
4389 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m4:
4390 ; CHECK: # %bb.0: # %entry
4391 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4392 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4395 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
4396 ret <vscale x 4 x double> %0
4399 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.se.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, iXLen, iXLen)
4401 define <vscale x 8 x double> @test_f_sf_vc_v_iv_se_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
4402 ; CHECK-LABEL: test_f_sf_vc_v_iv_se_e64m8:
4403 ; CHECK: # %bb.0: # %entry
4404 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4405 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4408 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
4409 ret <vscale x 8 x double> %0
4412 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.se.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, iXLen, iXLen)
4414 define <vscale x 1 x half> @test_f_sf_vc_v_iv_e16mf4(<vscale x 1 x half> %vs2, iXLen %vl) {
4415 ; CHECK-LABEL: test_f_sf_vc_v_iv_e16mf4:
4416 ; CHECK: # %bb.0: # %entry
4417 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4418 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4421 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.nxv1f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
4422 ret <vscale x 1 x half> %0
4425 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.iv.nxv1f16.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x half>, iXLen, iXLen)
4427 define <vscale x 2 x half> @test_f_sf_vc_v_iv_e16mf2(<vscale x 2 x half> %vs2, iXLen %vl) {
4428 ; CHECK-LABEL: test_f_sf_vc_v_iv_e16mf2:
4429 ; CHECK: # %bb.0: # %entry
4430 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4431 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4434 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.nxv2f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
4435 ret <vscale x 2 x half> %0
4438 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.iv.nxv2f16.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x half>, iXLen, iXLen)
4440 define <vscale x 4 x half> @test_f_sf_vc_v_iv_e16m1(<vscale x 4 x half> %vs2, iXLen %vl) {
4441 ; CHECK-LABEL: test_f_sf_vc_v_iv_e16m1:
4442 ; CHECK: # %bb.0: # %entry
4443 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4444 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4447 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.nxv4f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
4448 ret <vscale x 4 x half> %0
4451 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.iv.nxv4f16.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x half>, iXLen, iXLen)
4453 define <vscale x 8 x half> @test_f_sf_vc_v_iv_e16m2(<vscale x 8 x half> %vs2, iXLen %vl) {
4454 ; CHECK-LABEL: test_f_sf_vc_v_iv_e16m2:
4455 ; CHECK: # %bb.0: # %entry
4456 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4457 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4460 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.nxv8f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
4461 ret <vscale x 8 x half> %0
4464 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.iv.nxv8f16.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x half>, iXLen, iXLen)
4466 define <vscale x 16 x half> @test_f_sf_vc_v_iv_e16m4(<vscale x 16 x half> %vs2, iXLen %vl) {
4467 ; CHECK-LABEL: test_f_sf_vc_v_iv_e16m4:
4468 ; CHECK: # %bb.0: # %entry
4469 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4470 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4473 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.nxv16f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
4474 ret <vscale x 16 x half> %0
4477 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.iv.nxv16f16.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x half>, iXLen, iXLen)
4479 define <vscale x 32 x half> @test_f_sf_vc_v_iv_e16m8(<vscale x 32 x half> %vs2, iXLen %vl) {
4480 ; CHECK-LABEL: test_f_sf_vc_v_iv_e16m8:
4481 ; CHECK: # %bb.0: # %entry
4482 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4483 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4486 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.nxv32f16.iXLen.iXLen.iXLen(iXLen 3, <vscale x 32 x half> %vs2, iXLen 10, iXLen %vl)
4487 ret <vscale x 32 x half> %0
4490 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.iv.nxv32f16.iXLen.iXLen.iXLen(iXLen, <vscale x 32 x half>, iXLen, iXLen)
4492 define <vscale x 1 x float> @test_f_sf_vc_v_iv_e32mf2(<vscale x 1 x float> %vs2, iXLen %vl) {
4493 ; CHECK-LABEL: test_f_sf_vc_v_iv_e32mf2:
4494 ; CHECK: # %bb.0: # %entry
4495 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4496 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4499 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.nxv1f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
4500 ret <vscale x 1 x float> %0
4503 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.iv.nxv1f32.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x float>, iXLen, iXLen)
4505 define <vscale x 2 x float> @test_f_sf_vc_v_iv_e32m1(<vscale x 2 x float> %vs2, iXLen %vl) {
4506 ; CHECK-LABEL: test_f_sf_vc_v_iv_e32m1:
4507 ; CHECK: # %bb.0: # %entry
4508 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4509 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4512 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.nxv2f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
4513 ret <vscale x 2 x float> %0
4516 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.iv.nxv2f32.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x float>, iXLen, iXLen)
4518 define <vscale x 4 x float> @test_f_sf_vc_v_iv_e32m2(<vscale x 4 x float> %vs2, iXLen %vl) {
4519 ; CHECK-LABEL: test_f_sf_vc_v_iv_e32m2:
4520 ; CHECK: # %bb.0: # %entry
4521 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4522 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4525 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.nxv4f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
4526 ret <vscale x 4 x float> %0
4529 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.iv.nxv4f32.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x float>, iXLen, iXLen)
4531 define <vscale x 8 x float> @test_f_sf_vc_v_iv_e32m4(<vscale x 8 x float> %vs2, iXLen %vl) {
4532 ; CHECK-LABEL: test_f_sf_vc_v_iv_e32m4:
4533 ; CHECK: # %bb.0: # %entry
4534 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4535 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4538 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.nxv8f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
4539 ret <vscale x 8 x float> %0
4542 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.iv.nxv8f32.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x float>, iXLen, iXLen)
4544 define <vscale x 16 x float> @test_f_sf_vc_v_iv_e32m8(<vscale x 16 x float> %vs2, iXLen %vl) {
4545 ; CHECK-LABEL: test_f_sf_vc_v_iv_e32m8:
4546 ; CHECK: # %bb.0: # %entry
4547 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4548 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4551 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.nxv16f32.iXLen.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vs2, iXLen 10, iXLen %vl)
4552 ret <vscale x 16 x float> %0
4555 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.iv.nxv16f32.iXLen.iXLen.iXLen(iXLen, <vscale x 16 x float>, iXLen, iXLen)
4557 define <vscale x 1 x double> @test_f_sf_vc_v_iv_e64m1(<vscale x 1 x double> %vs2, iXLen %vl) {
4558 ; CHECK-LABEL: test_f_sf_vc_v_iv_e64m1:
4559 ; CHECK: # %bb.0: # %entry
4560 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4561 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4564 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.nxv1f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vs2, iXLen 10, iXLen %vl)
4565 ret <vscale x 1 x double> %0
4568 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.iv.nxv1f64.iXLen.iXLen.iXLen(iXLen, <vscale x 1 x double>, iXLen, iXLen)
4570 define <vscale x 2 x double> @test_f_sf_vc_v_iv_e64m2(<vscale x 2 x double> %vs2, iXLen %vl) {
4571 ; CHECK-LABEL: test_f_sf_vc_v_iv_e64m2:
4572 ; CHECK: # %bb.0: # %entry
4573 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4574 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4577 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.nxv2f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vs2, iXLen 10, iXLen %vl)
4578 ret <vscale x 2 x double> %0
4581 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.iv.nxv2f64.iXLen.iXLen.iXLen(iXLen, <vscale x 2 x double>, iXLen, iXLen)
4583 define <vscale x 4 x double> @test_f_sf_vc_v_iv_e64m4(<vscale x 4 x double> %vs2, iXLen %vl) {
4584 ; CHECK-LABEL: test_f_sf_vc_v_iv_e64m4:
4585 ; CHECK: # %bb.0: # %entry
4586 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4587 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4590 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.nxv4f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vs2, iXLen 10, iXLen %vl)
4591 ret <vscale x 4 x double> %0
4594 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.iv.nxv4f64.iXLen.iXLen.iXLen(iXLen, <vscale x 4 x double>, iXLen, iXLen)
4596 define <vscale x 8 x double> @test_f_sf_vc_v_iv_e64m8(<vscale x 8 x double> %vs2, iXLen %vl) {
4597 ; CHECK-LABEL: test_f_sf_vc_v_iv_e64m8:
4598 ; CHECK: # %bb.0: # %entry
4599 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4600 ; CHECK-NEXT: sf.vc.v.iv 3, v8, v8, 10
4603 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.nxv8f64.iXLen.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vs2, iXLen 10, iXLen %vl)
4604 ret <vscale x 8 x double> %0
4607 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.iv.nxv8f64.iXLen.iXLen.iXLen(iXLen, <vscale x 8 x double>, iXLen, iXLen)
4609 define void @test_f_sf_vc_fv_se_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
4610 ; CHECK-LABEL: test_f_sf_vc_fv_se_e16mf4:
4611 ; CHECK: # %bb.0: # %entry
4612 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4613 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4616 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
4620 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f16.f16.iXLen(iXLen, iXLen, <vscale x 1 x half>, half, iXLen)
4622 define void @test_f_sf_vc_fv_se_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
4623 ; CHECK-LABEL: test_f_sf_vc_fv_se_e16mf2:
4624 ; CHECK: # %bb.0: # %entry
4625 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4626 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4629 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
4633 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f16.f16.iXLen(iXLen, iXLen, <vscale x 2 x half>, half, iXLen)
4635 define void @test_f_sf_vc_fv_se_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
4636 ; CHECK-LABEL: test_f_sf_vc_fv_se_e16m1:
4637 ; CHECK: # %bb.0: # %entry
4638 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4639 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4642 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
4646 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f16.f16.iXLen(iXLen, iXLen, <vscale x 4 x half>, half, iXLen)
4648 define void @test_f_sf_vc_fv_se_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
4649 ; CHECK-LABEL: test_f_sf_vc_fv_se_e16m2:
4650 ; CHECK: # %bb.0: # %entry
4651 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4652 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4655 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
4659 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f16.f16.iXLen(iXLen, iXLen, <vscale x 8 x half>, half, iXLen)
4661 define void @test_f_sf_vc_fv_se_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
4662 ; CHECK-LABEL: test_f_sf_vc_fv_se_e16m4:
4663 ; CHECK: # %bb.0: # %entry
4664 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4665 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4668 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
4672 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f16.f16.iXLen(iXLen, iXLen, <vscale x 16 x half>, half, iXLen)
4674 define void @test_f_sf_vc_fv_se_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
4675 ; CHECK-LABEL: test_f_sf_vc_fv_se_e16m8:
4676 ; CHECK: # %bb.0: # %entry
4677 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4678 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4681 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.f16.iXLen(iXLen 1, iXLen 31, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
4685 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv32f16.f16.iXLen(iXLen, iXLen, <vscale x 32 x half>, half, iXLen)
4687 define void @test_f_sf_vc_fv_se_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
4688 ; CHECK-LABEL: test_f_sf_vc_fv_se_e32mf2:
4689 ; CHECK: # %bb.0: # %entry
4690 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4691 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4694 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
4698 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f32.f32.iXLen(iXLen, iXLen, <vscale x 1 x float>, float, iXLen)
4700 define void @test_f_sf_vc_fv_se_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
4701 ; CHECK-LABEL: test_f_sf_vc_fv_se_e32m1:
4702 ; CHECK: # %bb.0: # %entry
4703 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4704 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4707 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
4711 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f32.f32.iXLen(iXLen, iXLen, <vscale x 2 x float>, float, iXLen)
4713 define void @test_f_sf_vc_fv_se_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
4714 ; CHECK-LABEL: test_f_sf_vc_fv_se_e32m2:
4715 ; CHECK: # %bb.0: # %entry
4716 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4717 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4720 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
4724 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f32.f32.iXLen(iXLen, iXLen, <vscale x 4 x float>, float, iXLen)
4726 define void @test_f_sf_vc_fv_se_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
4727 ; CHECK-LABEL: test_f_sf_vc_fv_se_e32m4:
4728 ; CHECK: # %bb.0: # %entry
4729 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4730 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4733 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
4737 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f32.f32.iXLen(iXLen, iXLen, <vscale x 8 x float>, float, iXLen)
4739 define void @test_f_sf_vc_fv_se_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
4740 ; CHECK-LABEL: test_f_sf_vc_fv_se_e32m8:
4741 ; CHECK: # %bb.0: # %entry
4742 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4743 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4746 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.f32.iXLen(iXLen 1, iXLen 31, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
4750 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv16f32.f32.iXLen(iXLen, iXLen, <vscale x 16 x float>, float, iXLen)
4752 define void @test_f_sf_vc_fv_se_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
4753 ; CHECK-LABEL: test_f_sf_vc_fv_se_e64m1:
4754 ; CHECK: # %bb.0: # %entry
4755 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4756 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4759 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
4763 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv1f64.f64.iXLen(iXLen, iXLen, <vscale x 1 x double>, double, iXLen)
4765 define void @test_f_sf_vc_fv_se_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
4766 ; CHECK-LABEL: test_f_sf_vc_fv_se_e64m2:
4767 ; CHECK: # %bb.0: # %entry
4768 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4769 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4772 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
4776 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv2f64.f64.iXLen(iXLen, iXLen, <vscale x 2 x double>, double, iXLen)
4778 define void @test_f_sf_vc_fv_se_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
4779 ; CHECK-LABEL: test_f_sf_vc_fv_se_e64m4:
4780 ; CHECK: # %bb.0: # %entry
4781 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4782 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4785 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
4789 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv4f64.f64.iXLen(iXLen, iXLen, <vscale x 4 x double>, double, iXLen)
4791 define void @test_f_sf_vc_fv_se_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
4792 ; CHECK-LABEL: test_f_sf_vc_fv_se_e64m8:
4793 ; CHECK: # %bb.0: # %entry
4794 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4795 ; CHECK-NEXT: sf.vc.fv 1, 31, v8, fa0
4798 tail call void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f64.f64.iXLen(iXLen 1, iXLen 31, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
4802 declare void @llvm.riscv.sf.vc.fv.se.iXLen.nxv8f64.f64.iXLen(iXLen, iXLen, <vscale x 8 x double>, double, iXLen)
4804 define <vscale x 1 x half> @test_f_sf_vc_v_fv_se_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
4805 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16mf4:
4806 ; CHECK: # %bb.0: # %entry
4807 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
4808 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4811 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
4812 ret <vscale x 1 x half> %0
4815 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.se.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, half, iXLen)
4817 define <vscale x 2 x half> @test_f_sf_vc_v_fv_se_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
4818 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16mf2:
4819 ; CHECK: # %bb.0: # %entry
4820 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
4821 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4824 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
4825 ret <vscale x 2 x half> %0
4828 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.se.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, half, iXLen)
4830 define <vscale x 4 x half> @test_f_sf_vc_v_fv_se_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
4831 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m1:
4832 ; CHECK: # %bb.0: # %entry
4833 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
4834 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4837 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
4838 ret <vscale x 4 x half> %0
4841 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.se.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, half, iXLen)
4843 define <vscale x 8 x half> @test_f_sf_vc_v_fv_se_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
4844 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m2:
4845 ; CHECK: # %bb.0: # %entry
4846 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
4847 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4850 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
4851 ret <vscale x 8 x half> %0
4854 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.se.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, half, iXLen)
4856 define <vscale x 16 x half> @test_f_sf_vc_v_fv_se_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
4857 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m4:
4858 ; CHECK: # %bb.0: # %entry
4859 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
4860 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4863 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
4864 ret <vscale x 16 x half> %0
4867 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.se.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, half, iXLen)
4869 define <vscale x 32 x half> @test_f_sf_vc_v_fv_se_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
4870 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e16m8:
4871 ; CHECK: # %bb.0: # %entry
4872 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
4873 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4876 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
4877 ret <vscale x 32 x half> %0
4880 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.se.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, half, iXLen)
4882 define <vscale x 1 x float> @test_f_sf_vc_v_fv_se_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
4883 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32mf2:
4884 ; CHECK: # %bb.0: # %entry
4885 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
4886 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4889 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
4890 ret <vscale x 1 x float> %0
4893 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.se.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, float, iXLen)
4895 define <vscale x 2 x float> @test_f_sf_vc_v_fv_se_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
4896 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m1:
4897 ; CHECK: # %bb.0: # %entry
4898 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
4899 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4902 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
4903 ret <vscale x 2 x float> %0
4906 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.se.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, float, iXLen)
4908 define <vscale x 4 x float> @test_f_sf_vc_v_fv_se_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
4909 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m2:
4910 ; CHECK: # %bb.0: # %entry
4911 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
4912 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4915 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
4916 ret <vscale x 4 x float> %0
4919 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.se.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, float, iXLen)
4921 define <vscale x 8 x float> @test_f_sf_vc_v_fv_se_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
4922 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m4:
4923 ; CHECK: # %bb.0: # %entry
4924 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
4925 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4928 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
4929 ret <vscale x 8 x float> %0
4932 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.se.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, float, iXLen)
4934 define <vscale x 16 x float> @test_f_sf_vc_v_fv_se_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
4935 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e32m8:
4936 ; CHECK: # %bb.0: # %entry
4937 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
4938 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4941 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
4942 ret <vscale x 16 x float> %0
4945 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.se.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, float, iXLen)
4947 define <vscale x 1 x double> @test_f_sf_vc_v_fv_se_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
4948 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m1:
4949 ; CHECK: # %bb.0: # %entry
4950 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
4951 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4954 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.se.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
4955 ret <vscale x 1 x double> %0
4958 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.se.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, double, iXLen)
4960 define <vscale x 2 x double> @test_f_sf_vc_v_fv_se_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
4961 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m2:
4962 ; CHECK: # %bb.0: # %entry
4963 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
4964 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4967 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.se.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
4968 ret <vscale x 2 x double> %0
4971 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.se.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, double, iXLen)
4973 define <vscale x 4 x double> @test_f_sf_vc_v_fv_se_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
4974 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m4:
4975 ; CHECK: # %bb.0: # %entry
4976 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
4977 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4980 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.se.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
4981 ret <vscale x 4 x double> %0
4984 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.se.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, double, iXLen)
4986 define <vscale x 8 x double> @test_f_sf_vc_v_fv_se_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
4987 ; CHECK-LABEL: test_f_sf_vc_v_fv_se_e64m8:
4988 ; CHECK: # %bb.0: # %entry
4989 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
4990 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
4993 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.se.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
4994 ret <vscale x 8 x double> %0
4997 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.se.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, double, iXLen)
4999 define <vscale x 1 x half> @test_f_sf_vc_v_fv_e16mf4(<vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
5000 ; CHECK-LABEL: test_f_sf_vc_v_fv_e16mf4:
5001 ; CHECK: # %bb.0: # %entry
5002 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
5003 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5006 %0 = tail call <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.nxv1f16.iXLen.f16.iXLen(iXLen 1, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
5007 ret <vscale x 1 x half> %0
5010 declare <vscale x 1 x half> @llvm.riscv.sf.vc.v.fv.nxv1f16.iXLen.f16.iXLen(iXLen, <vscale x 1 x half>, half, iXLen)
5012 define <vscale x 2 x half> @test_f_sf_vc_v_fv_e16mf2(<vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
5013 ; CHECK-LABEL: test_f_sf_vc_v_fv_e16mf2:
5014 ; CHECK: # %bb.0: # %entry
5015 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
5016 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5019 %0 = tail call <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.nxv2f16.iXLen.f16.iXLen(iXLen 1, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
5020 ret <vscale x 2 x half> %0
5023 declare <vscale x 2 x half> @llvm.riscv.sf.vc.v.fv.nxv2f16.iXLen.f16.iXLen(iXLen, <vscale x 2 x half>, half, iXLen)
5025 define <vscale x 4 x half> @test_f_sf_vc_v_fv_e16m1(<vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
5026 ; CHECK-LABEL: test_f_sf_vc_v_fv_e16m1:
5027 ; CHECK: # %bb.0: # %entry
5028 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
5029 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5032 %0 = tail call <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.nxv4f16.iXLen.f16.iXLen(iXLen 1, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
5033 ret <vscale x 4 x half> %0
5036 declare <vscale x 4 x half> @llvm.riscv.sf.vc.v.fv.nxv4f16.iXLen.f16.iXLen(iXLen, <vscale x 4 x half>, half, iXLen)
5038 define <vscale x 8 x half> @test_f_sf_vc_v_fv_e16m2(<vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
5039 ; CHECK-LABEL: test_f_sf_vc_v_fv_e16m2:
5040 ; CHECK: # %bb.0: # %entry
5041 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
5042 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5045 %0 = tail call <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.nxv8f16.iXLen.f16.iXLen(iXLen 1, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
5046 ret <vscale x 8 x half> %0
5049 declare <vscale x 8 x half> @llvm.riscv.sf.vc.v.fv.nxv8f16.iXLen.f16.iXLen(iXLen, <vscale x 8 x half>, half, iXLen)
5051 define <vscale x 16 x half> @test_f_sf_vc_v_fv_e16m4(<vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
5052 ; CHECK-LABEL: test_f_sf_vc_v_fv_e16m4:
5053 ; CHECK: # %bb.0: # %entry
5054 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
5055 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5058 %0 = tail call <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.nxv16f16.iXLen.f16.iXLen(iXLen 1, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
5059 ret <vscale x 16 x half> %0
5062 declare <vscale x 16 x half> @llvm.riscv.sf.vc.v.fv.nxv16f16.iXLen.f16.iXLen(iXLen, <vscale x 16 x half>, half, iXLen)
5064 define <vscale x 32 x half> @test_f_sf_vc_v_fv_e16m8(<vscale x 32 x half> %vs2, half %fs1, iXLen %vl) {
5065 ; CHECK-LABEL: test_f_sf_vc_v_fv_e16m8:
5066 ; CHECK: # %bb.0: # %entry
5067 ; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
5068 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5071 %0 = tail call <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.nxv32f16.iXLen.f16.iXLen(iXLen 1, <vscale x 32 x half> %vs2, half %fs1, iXLen %vl)
5072 ret <vscale x 32 x half> %0
5075 declare <vscale x 32 x half> @llvm.riscv.sf.vc.v.fv.nxv32f16.iXLen.f16.iXLen(iXLen, <vscale x 32 x half>, half, iXLen)
5077 define <vscale x 1 x float> @test_f_sf_vc_v_fv_e32mf2(<vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
5078 ; CHECK-LABEL: test_f_sf_vc_v_fv_e32mf2:
5079 ; CHECK: # %bb.0: # %entry
5080 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
5081 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5084 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.nxv1f32.iXLen.f32.iXLen(iXLen 1, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
5085 ret <vscale x 1 x float> %0
5088 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fv.nxv1f32.iXLen.f32.iXLen(iXLen, <vscale x 1 x float>, float, iXLen)
5090 define <vscale x 2 x float> @test_f_sf_vc_v_fv_e32m1(<vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
5091 ; CHECK-LABEL: test_f_sf_vc_v_fv_e32m1:
5092 ; CHECK: # %bb.0: # %entry
5093 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
5094 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5097 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.nxv2f32.iXLen.f32.iXLen(iXLen 1, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
5098 ret <vscale x 2 x float> %0
5101 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fv.nxv2f32.iXLen.f32.iXLen(iXLen, <vscale x 2 x float>, float, iXLen)
5103 define <vscale x 4 x float> @test_f_sf_vc_v_fv_e32m2(<vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
5104 ; CHECK-LABEL: test_f_sf_vc_v_fv_e32m2:
5105 ; CHECK: # %bb.0: # %entry
5106 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
5107 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5110 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.nxv4f32.iXLen.f32.iXLen(iXLen 1, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
5111 ret <vscale x 4 x float> %0
5114 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fv.nxv4f32.iXLen.f32.iXLen(iXLen, <vscale x 4 x float>, float, iXLen)
5116 define <vscale x 8 x float> @test_f_sf_vc_v_fv_e32m4(<vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
5117 ; CHECK-LABEL: test_f_sf_vc_v_fv_e32m4:
5118 ; CHECK: # %bb.0: # %entry
5119 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
5120 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5123 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.nxv8f32.iXLen.f32.iXLen(iXLen 1, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
5124 ret <vscale x 8 x float> %0
5127 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fv.nxv8f32.iXLen.f32.iXLen(iXLen, <vscale x 8 x float>, float, iXLen)
5129 define <vscale x 16 x float> @test_f_sf_vc_v_fv_e32m8(<vscale x 16 x float> %vs2, float %fs1, iXLen %vl) {
5130 ; CHECK-LABEL: test_f_sf_vc_v_fv_e32m8:
5131 ; CHECK: # %bb.0: # %entry
5132 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
5133 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5136 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.nxv16f32.iXLen.f32.iXLen(iXLen 1, <vscale x 16 x float> %vs2, float %fs1, iXLen %vl)
5137 ret <vscale x 16 x float> %0
5140 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fv.nxv16f32.iXLen.f32.iXLen(iXLen, <vscale x 16 x float>, float, iXLen)
5142 define <vscale x 1 x double> @test_f_sf_vc_v_fv_e64m1(<vscale x 1 x double> %vs2, double %fs1, iXLen %vl) {
5143 ; CHECK-LABEL: test_f_sf_vc_v_fv_e64m1:
5144 ; CHECK: # %bb.0: # %entry
5145 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
5146 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5149 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.nxv1f64.iXLen.f64.iXLen(iXLen 1, <vscale x 1 x double> %vs2, double %fs1, iXLen %vl)
5150 ret <vscale x 1 x double> %0
5153 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fv.nxv1f64.iXLen.f64.iXLen(iXLen, <vscale x 1 x double>, double, iXLen)
5155 define <vscale x 2 x double> @test_f_sf_vc_v_fv_e64m2(<vscale x 2 x double> %vs2, double %fs1, iXLen %vl) {
5156 ; CHECK-LABEL: test_f_sf_vc_v_fv_e64m2:
5157 ; CHECK: # %bb.0: # %entry
5158 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
5159 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5162 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.nxv2f64.iXLen.f64.iXLen(iXLen 1, <vscale x 2 x double> %vs2, double %fs1, iXLen %vl)
5163 ret <vscale x 2 x double> %0
5166 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fv.nxv2f64.iXLen.f64.iXLen(iXLen, <vscale x 2 x double>, double, iXLen)
5168 define <vscale x 4 x double> @test_f_sf_vc_v_fv_e64m4(<vscale x 4 x double> %vs2, double %fs1, iXLen %vl) {
5169 ; CHECK-LABEL: test_f_sf_vc_v_fv_e64m4:
5170 ; CHECK: # %bb.0: # %entry
5171 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
5172 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5175 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.nxv4f64.iXLen.f64.iXLen(iXLen 1, <vscale x 4 x double> %vs2, double %fs1, iXLen %vl)
5176 ret <vscale x 4 x double> %0
5179 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fv.nxv4f64.iXLen.f64.iXLen(iXLen, <vscale x 4 x double>, double, iXLen)
5181 define <vscale x 8 x double> @test_f_sf_vc_v_fv_e64m8(<vscale x 8 x double> %vs2, double %fs1, iXLen %vl) {
5182 ; CHECK-LABEL: test_f_sf_vc_v_fv_e64m8:
5183 ; CHECK: # %bb.0: # %entry
5184 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
5185 ; CHECK-NEXT: sf.vc.v.fv 1, v8, v8, fa0
5188 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.nxv8f64.iXLen.f64.iXLen(iXLen 1, <vscale x 8 x double> %vs2, double %fs1, iXLen %vl)
5189 ret <vscale x 8 x double> %0
5192 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fv.nxv8f64.iXLen.f64.iXLen(iXLen, <vscale x 8 x double>, double, iXLen)