1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 define void @test_sf_vc_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
8 ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf8:
9 ; CHECK: # %bb.0: # %entry
10 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
11 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
14 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
18 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
20 define void @test_sf_vc_vvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
21 ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
24 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
27 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
31 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
33 define void @test_sf_vc_vvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
34 ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
37 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
40 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
44 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
46 define void @test_sf_vc_vvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
47 ; CHECK-LABEL: test_sf_vc_vvw_se_e8m1:
48 ; CHECK: # %bb.0: # %entry
49 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
50 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
53 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
57 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
59 define void @test_sf_vc_vvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
60 ; CHECK-LABEL: test_sf_vc_vvw_se_e8m2:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
63 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
66 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
70 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
72 define void @test_sf_vc_vvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
73 ; CHECK-LABEL: test_sf_vc_vvw_se_e8m4:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
76 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
79 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
83 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
85 define void @test_sf_vc_vvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
86 ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
89 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
92 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
96 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
98 define void @test_sf_vc_vvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
99 ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2:
100 ; CHECK: # %bb.0: # %entry
101 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
102 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
105 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
109 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
111 define void @test_sf_vc_vvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
112 ; CHECK-LABEL: test_sf_vc_vvw_se_e16m1:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
115 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
118 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
122 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
124 define void @test_sf_vc_vvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
125 ; CHECK-LABEL: test_sf_vc_vvw_se_e16m2:
126 ; CHECK: # %bb.0: # %entry
127 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
128 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
131 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
135 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
137 define void @test_sf_vc_vvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
138 ; CHECK-LABEL: test_sf_vc_vvw_se_e16m4:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
141 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
144 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
148 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
150 define void @test_sf_vc_vvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
151 ; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2:
152 ; CHECK: # %bb.0: # %entry
153 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
154 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
157 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
161 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
163 define void @test_sf_vc_vvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
164 ; CHECK-LABEL: test_sf_vc_vvw_se_e32m1:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
167 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
170 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
174 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
176 define void @test_sf_vc_vvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
177 ; CHECK-LABEL: test_sf_vc_vvw_se_e32m2:
178 ; CHECK: # %bb.0: # %entry
179 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
180 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
183 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
187 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
189 define void @test_sf_vc_vvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
190 ; CHECK-LABEL: test_sf_vc_vvw_se_e32m4:
191 ; CHECK: # %bb.0: # %entry
192 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
193 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
196 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
200 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
202 define <vscale x 1 x i16> @test_sf_vc_v_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
203 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
206 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
209 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
210 ret <vscale x 1 x i16> %0
213 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
215 define <vscale x 2 x i16> @test_sf_vc_v_vvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
216 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4:
217 ; CHECK: # %bb.0: # %entry
218 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
219 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
222 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
223 ret <vscale x 2 x i16> %0
226 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
228 define <vscale x 4 x i16> @test_sf_vc_v_vvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
229 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2:
230 ; CHECK: # %bb.0: # %entry
231 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
232 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
235 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
236 ret <vscale x 4 x i16> %0
239 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
241 define <vscale x 8 x i16> @test_sf_vc_v_vvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
242 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1:
243 ; CHECK: # %bb.0: # %entry
244 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
245 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
248 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
249 ret <vscale x 8 x i16> %0
252 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
254 define <vscale x 16 x i16> @test_sf_vc_v_vvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
255 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2:
256 ; CHECK: # %bb.0: # %entry
257 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
258 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
261 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
262 ret <vscale x 16 x i16> %0
265 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
267 define <vscale x 32 x i16> @test_sf_vc_v_vvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
268 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4:
269 ; CHECK: # %bb.0: # %entry
270 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
271 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
274 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
275 ret <vscale x 32 x i16> %0
278 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
280 define <vscale x 1 x i32> @test_sf_vc_v_vvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
281 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4:
282 ; CHECK: # %bb.0: # %entry
283 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
284 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
287 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
288 ret <vscale x 1 x i32> %0
291 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
293 define <vscale x 2 x i32> @test_sf_vc_v_vvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
294 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2:
295 ; CHECK: # %bb.0: # %entry
296 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
297 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
300 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
301 ret <vscale x 2 x i32> %0
304 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
306 define <vscale x 4 x i32> @test_sf_vc_v_vvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
307 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1:
308 ; CHECK: # %bb.0: # %entry
309 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
310 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
313 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
314 ret <vscale x 4 x i32> %0
317 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
319 define <vscale x 8 x i32> @test_sf_vc_v_vvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
320 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
323 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
326 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
327 ret <vscale x 8 x i32> %0
330 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
332 define <vscale x 16 x i32> @test_sf_vc_v_vvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
333 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4:
334 ; CHECK: # %bb.0: # %entry
335 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
336 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
339 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
340 ret <vscale x 16 x i32> %0
343 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
345 define <vscale x 1 x i64> @test_sf_vc_v_vvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
346 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2:
347 ; CHECK: # %bb.0: # %entry
348 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
349 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
352 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
353 ret <vscale x 1 x i64> %0
356 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
358 define <vscale x 2 x i64> @test_sf_vc_v_vvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
359 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
362 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
365 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
366 ret <vscale x 2 x i64> %0
369 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
371 define <vscale x 4 x i64> @test_sf_vc_v_vvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
372 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2:
373 ; CHECK: # %bb.0: # %entry
374 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
375 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
378 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
379 ret <vscale x 4 x i64> %0
382 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
384 define <vscale x 8 x i64> @test_sf_vc_v_vvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
385 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4:
386 ; CHECK: # %bb.0: # %entry
387 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
388 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
391 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
392 ret <vscale x 8 x i64> %0
395 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
397 define <vscale x 1 x i16> @test_sf_vc_v_vvw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
398 ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
401 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
404 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
405 ret <vscale x 1 x i16> %0
408 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
410 define <vscale x 2 x i16> @test_sf_vc_v_vvw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
411 ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4:
412 ; CHECK: # %bb.0: # %entry
413 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
414 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
417 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
418 ret <vscale x 2 x i16> %0
421 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
423 define <vscale x 4 x i16> @test_sf_vc_v_vvw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
424 ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2:
425 ; CHECK: # %bb.0: # %entry
426 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
427 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
430 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
431 ret <vscale x 4 x i16> %0
434 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
436 define <vscale x 8 x i16> @test_sf_vc_v_vvw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
437 ; CHECK-LABEL: test_sf_vc_v_vvw_e8m1:
438 ; CHECK: # %bb.0: # %entry
439 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
440 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
443 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
444 ret <vscale x 8 x i16> %0
447 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
449 define <vscale x 16 x i16> @test_sf_vc_v_vvw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
450 ; CHECK-LABEL: test_sf_vc_v_vvw_e8m2:
451 ; CHECK: # %bb.0: # %entry
452 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
453 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
456 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
457 ret <vscale x 16 x i16> %0
460 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
462 define <vscale x 32 x i16> @test_sf_vc_v_vvw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
463 ; CHECK-LABEL: test_sf_vc_v_vvw_e8m4:
464 ; CHECK: # %bb.0: # %entry
465 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
466 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
469 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
470 ret <vscale x 32 x i16> %0
473 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
475 define <vscale x 1 x i32> @test_sf_vc_v_vvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
476 ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4:
477 ; CHECK: # %bb.0: # %entry
478 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
479 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
482 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
483 ret <vscale x 1 x i32> %0
486 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
488 define <vscale x 2 x i32> @test_sf_vc_v_vvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
489 ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2:
490 ; CHECK: # %bb.0: # %entry
491 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
492 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
495 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
496 ret <vscale x 2 x i32> %0
499 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
501 define <vscale x 4 x i32> @test_sf_vc_v_vvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
502 ; CHECK-LABEL: test_sf_vc_v_vvw_e16m1:
503 ; CHECK: # %bb.0: # %entry
504 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
505 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
508 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
509 ret <vscale x 4 x i32> %0
512 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
514 define <vscale x 8 x i32> @test_sf_vc_v_vvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
515 ; CHECK-LABEL: test_sf_vc_v_vvw_e16m2:
516 ; CHECK: # %bb.0: # %entry
517 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
518 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
521 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
522 ret <vscale x 8 x i32> %0
525 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
527 define <vscale x 16 x i32> @test_sf_vc_v_vvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
528 ; CHECK-LABEL: test_sf_vc_v_vvw_e16m4:
529 ; CHECK: # %bb.0: # %entry
530 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
531 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
534 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
535 ret <vscale x 16 x i32> %0
538 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
540 define <vscale x 1 x i64> @test_sf_vc_v_vvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
541 ; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
544 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
547 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
548 ret <vscale x 1 x i64> %0
551 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
553 define <vscale x 2 x i64> @test_sf_vc_v_vvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
554 ; CHECK-LABEL: test_sf_vc_v_vvw_e32m1:
555 ; CHECK: # %bb.0: # %entry
556 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
557 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
560 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
561 ret <vscale x 2 x i64> %0
564 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
566 define <vscale x 4 x i64> @test_sf_vc_v_vvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
567 ; CHECK-LABEL: test_sf_vc_v_vvw_e32m2:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
570 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
573 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
574 ret <vscale x 4 x i64> %0
577 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
579 define <vscale x 8 x i64> @test_sf_vc_v_vvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
580 ; CHECK-LABEL: test_sf_vc_v_vvw_e32m4:
581 ; CHECK: # %bb.0: # %entry
582 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
583 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
586 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
587 ret <vscale x 8 x i64> %0
590 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
592 define void @test_sf_vc_xvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
593 ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8:
594 ; CHECK: # %bb.0: # %entry
595 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
596 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
599 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
603 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
605 define void @test_sf_vc_xvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
606 ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4:
607 ; CHECK: # %bb.0: # %entry
608 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
609 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
612 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
616 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
618 define void @test_sf_vc_xvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
619 ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2:
620 ; CHECK: # %bb.0: # %entry
621 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
622 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
625 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
629 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
631 define void @test_sf_vc_xvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
632 ; CHECK-LABEL: test_sf_vc_xvw_se_e8m1:
633 ; CHECK: # %bb.0: # %entry
634 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
635 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
638 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
642 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
644 define void @test_sf_vc_xvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
645 ; CHECK-LABEL: test_sf_vc_xvw_se_e8m2:
646 ; CHECK: # %bb.0: # %entry
647 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
648 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
651 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
655 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
657 define void @test_sf_vc_xvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
658 ; CHECK-LABEL: test_sf_vc_xvw_se_e8m4:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
661 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
664 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
668 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
670 define void @test_sf_vc_xvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
671 ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4:
672 ; CHECK: # %bb.0: # %entry
673 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
674 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
677 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
681 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
683 define void @test_sf_vc_xvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
684 ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
687 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
690 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
694 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
696 define void @test_sf_vc_xvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
697 ; CHECK-LABEL: test_sf_vc_xvw_se_e16m1:
698 ; CHECK: # %bb.0: # %entry
699 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
700 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
703 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
707 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
709 define void @test_sf_vc_xvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
710 ; CHECK-LABEL: test_sf_vc_xvw_se_e16m2:
711 ; CHECK: # %bb.0: # %entry
712 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
713 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
716 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
720 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
722 define void @test_sf_vc_xvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
723 ; CHECK-LABEL: test_sf_vc_xvw_se_e16m4:
724 ; CHECK: # %bb.0: # %entry
725 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
726 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
729 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
733 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
735 define void @test_sf_vc_xvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
736 ; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2:
737 ; CHECK: # %bb.0: # %entry
738 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
739 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
742 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
746 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
748 define void @test_sf_vc_xvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
749 ; CHECK-LABEL: test_sf_vc_xvw_se_e32m1:
750 ; CHECK: # %bb.0: # %entry
751 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
752 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
755 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
759 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
761 define void @test_sf_vc_xvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
762 ; CHECK-LABEL: test_sf_vc_xvw_se_e32m2:
763 ; CHECK: # %bb.0: # %entry
764 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
765 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
768 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
772 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
774 define void @test_sf_vc_xvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
775 ; CHECK-LABEL: test_sf_vc_xvw_se_e32m4:
776 ; CHECK: # %bb.0: # %entry
777 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
778 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
781 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
785 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
787 define <vscale x 1 x i16> @test_sf_vc_v_xvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
788 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8:
789 ; CHECK: # %bb.0: # %entry
790 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
791 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
794 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
795 ret <vscale x 1 x i16> %0
798 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
800 define <vscale x 2 x i16> @test_sf_vc_v_xvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
801 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4:
802 ; CHECK: # %bb.0: # %entry
803 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
804 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
807 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
808 ret <vscale x 2 x i16> %0
811 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
813 define <vscale x 4 x i16> @test_sf_vc_v_xvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
814 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2:
815 ; CHECK: # %bb.0: # %entry
816 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
817 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
820 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
821 ret <vscale x 4 x i16> %0
824 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
826 define <vscale x 8 x i16> @test_sf_vc_v_xvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
827 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1:
828 ; CHECK: # %bb.0: # %entry
829 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
830 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
833 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
834 ret <vscale x 8 x i16> %0
837 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
839 define <vscale x 16 x i16> @test_sf_vc_v_xvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
840 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
843 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
846 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
847 ret <vscale x 16 x i16> %0
850 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
852 define <vscale x 32 x i16> @test_sf_vc_v_xvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
853 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4:
854 ; CHECK: # %bb.0: # %entry
855 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
856 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
859 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
860 ret <vscale x 32 x i16> %0
863 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
865 define <vscale x 1 x i32> @test_sf_vc_v_xvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
866 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4:
867 ; CHECK: # %bb.0: # %entry
868 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
869 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
872 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
873 ret <vscale x 1 x i32> %0
876 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
878 define <vscale x 2 x i32> @test_sf_vc_v_xvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
879 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2:
880 ; CHECK: # %bb.0: # %entry
881 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
882 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
885 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
886 ret <vscale x 2 x i32> %0
889 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
891 define <vscale x 4 x i32> @test_sf_vc_v_xvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
892 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1:
893 ; CHECK: # %bb.0: # %entry
894 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
895 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
898 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
899 ret <vscale x 4 x i32> %0
902 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
904 define <vscale x 8 x i32> @test_sf_vc_v_xvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
905 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2:
906 ; CHECK: # %bb.0: # %entry
907 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
908 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
911 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
912 ret <vscale x 8 x i32> %0
915 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
917 define <vscale x 16 x i32> @test_sf_vc_v_xvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
918 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4:
919 ; CHECK: # %bb.0: # %entry
920 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
921 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
924 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
925 ret <vscale x 16 x i32> %0
928 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
930 define <vscale x 1 x i64> @test_sf_vc_v_xvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
931 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
934 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
937 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
938 ret <vscale x 1 x i64> %0
941 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
943 define <vscale x 2 x i64> @test_sf_vc_v_xvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
944 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1:
945 ; CHECK: # %bb.0: # %entry
946 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
947 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
950 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
951 ret <vscale x 2 x i64> %0
954 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
956 define <vscale x 4 x i64> @test_sf_vc_v_xvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
957 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
960 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
963 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
964 ret <vscale x 4 x i64> %0
967 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
969 define <vscale x 8 x i64> @test_sf_vc_v_xvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
970 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4:
971 ; CHECK: # %bb.0: # %entry
972 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
973 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
976 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
977 ret <vscale x 8 x i64> %0
980 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
982 define <vscale x 1 x i16> @test_sf_vc_v_xvw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
983 ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8:
984 ; CHECK: # %bb.0: # %entry
985 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
986 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
989 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
990 ret <vscale x 1 x i16> %0
993 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
995 define <vscale x 2 x i16> @test_sf_vc_v_xvw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
996 ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4:
997 ; CHECK: # %bb.0: # %entry
998 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
999 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1002 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1003 ret <vscale x 2 x i16> %0
1006 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
1008 define <vscale x 4 x i16> @test_sf_vc_v_xvw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1009 ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2:
1010 ; CHECK: # %bb.0: # %entry
1011 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
1012 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1015 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1016 ret <vscale x 4 x i16> %0
1019 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
1021 define <vscale x 8 x i16> @test_sf_vc_v_xvw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1022 ; CHECK-LABEL: test_sf_vc_v_xvw_e8m1:
1023 ; CHECK: # %bb.0: # %entry
1024 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
1025 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
1028 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1029 ret <vscale x 8 x i16> %0
1032 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
1034 define <vscale x 16 x i16> @test_sf_vc_v_xvw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1035 ; CHECK-LABEL: test_sf_vc_v_xvw_e8m2:
1036 ; CHECK: # %bb.0: # %entry
1037 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
1038 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
1041 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1042 ret <vscale x 16 x i16> %0
1045 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
1047 define <vscale x 32 x i16> @test_sf_vc_v_xvw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1048 ; CHECK-LABEL: test_sf_vc_v_xvw_e8m4:
1049 ; CHECK: # %bb.0: # %entry
1050 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
1051 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
1054 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1055 ret <vscale x 32 x i16> %0
1058 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
1060 define <vscale x 1 x i32> @test_sf_vc_v_xvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1061 ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4:
1062 ; CHECK: # %bb.0: # %entry
1063 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
1064 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1067 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1068 ret <vscale x 1 x i32> %0
1071 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
1073 define <vscale x 2 x i32> @test_sf_vc_v_xvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1074 ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2:
1075 ; CHECK: # %bb.0: # %entry
1076 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
1077 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1080 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1081 ret <vscale x 2 x i32> %0
1084 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
1086 define <vscale x 4 x i32> @test_sf_vc_v_xvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1087 ; CHECK-LABEL: test_sf_vc_v_xvw_e16m1:
1088 ; CHECK: # %bb.0: # %entry
1089 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
1090 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
1093 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1094 ret <vscale x 4 x i32> %0
1097 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
1099 define <vscale x 8 x i32> @test_sf_vc_v_xvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1100 ; CHECK-LABEL: test_sf_vc_v_xvw_e16m2:
1101 ; CHECK: # %bb.0: # %entry
1102 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
1103 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
1106 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1107 ret <vscale x 8 x i32> %0
1110 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
1112 define <vscale x 16 x i32> @test_sf_vc_v_xvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1113 ; CHECK-LABEL: test_sf_vc_v_xvw_e16m4:
1114 ; CHECK: # %bb.0: # %entry
1115 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
1116 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
1119 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1120 ret <vscale x 16 x i32> %0
1123 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
1125 define <vscale x 1 x i64> @test_sf_vc_v_xvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1126 ; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2:
1127 ; CHECK: # %bb.0: # %entry
1128 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
1129 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1132 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1133 ret <vscale x 1 x i64> %0
1136 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
1138 define <vscale x 2 x i64> @test_sf_vc_v_xvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1139 ; CHECK-LABEL: test_sf_vc_v_xvw_e32m1:
1140 ; CHECK: # %bb.0: # %entry
1141 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
1142 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
1145 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1146 ret <vscale x 2 x i64> %0
1149 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
1151 define <vscale x 4 x i64> @test_sf_vc_v_xvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1152 ; CHECK-LABEL: test_sf_vc_v_xvw_e32m2:
1153 ; CHECK: # %bb.0: # %entry
1154 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
1155 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
1158 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1159 ret <vscale x 4 x i64> %0
1162 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
1164 define <vscale x 8 x i64> @test_sf_vc_v_xvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1165 ; CHECK-LABEL: test_sf_vc_v_xvw_e32m4:
1166 ; CHECK: # %bb.0: # %entry
1167 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
1168 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
1171 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1172 ret <vscale x 8 x i64> %0
1175 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
1177 define void @test_sf_vc_ivw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1178 ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8:
1179 ; CHECK: # %bb.0: # %entry
1180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1181 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1184 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1188 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
1190 define void @test_sf_vc_ivw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1191 ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4:
1192 ; CHECK: # %bb.0: # %entry
1193 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1194 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1197 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1201 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
1203 define void @test_sf_vc_ivw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1204 ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2:
1205 ; CHECK: # %bb.0: # %entry
1206 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1207 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1210 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1214 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
1216 define void @test_sf_vc_ivw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1217 ; CHECK-LABEL: test_sf_vc_ivw_se_e8m1:
1218 ; CHECK: # %bb.0: # %entry
1219 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1220 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
1223 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1227 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
1229 define void @test_sf_vc_ivw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1230 ; CHECK-LABEL: test_sf_vc_ivw_se_e8m2:
1231 ; CHECK: # %bb.0: # %entry
1232 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1233 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
1236 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1240 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
1242 define void @test_sf_vc_ivw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1243 ; CHECK-LABEL: test_sf_vc_ivw_se_e8m4:
1244 ; CHECK: # %bb.0: # %entry
1245 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1246 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
1249 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1253 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
1255 define void @test_sf_vc_ivw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1256 ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4:
1257 ; CHECK: # %bb.0: # %entry
1258 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1259 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1262 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1266 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
1268 define void @test_sf_vc_ivw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1269 ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2:
1270 ; CHECK: # %bb.0: # %entry
1271 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1272 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1275 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1279 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
1281 define void @test_sf_vc_ivw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1282 ; CHECK-LABEL: test_sf_vc_ivw_se_e16m1:
1283 ; CHECK: # %bb.0: # %entry
1284 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1285 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
1288 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1292 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
1294 define void @test_sf_vc_ivw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1295 ; CHECK-LABEL: test_sf_vc_ivw_se_e16m2:
1296 ; CHECK: # %bb.0: # %entry
1297 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1298 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
1301 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1305 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
1307 define void @test_sf_vc_ivw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1308 ; CHECK-LABEL: test_sf_vc_ivw_se_e16m4:
1309 ; CHECK: # %bb.0: # %entry
1310 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1311 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
1314 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1318 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
1320 define void @test_sf_vc_ivw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1321 ; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2:
1322 ; CHECK: # %bb.0: # %entry
1323 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1324 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1327 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1331 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
1333 define void @test_sf_vc_ivw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1334 ; CHECK-LABEL: test_sf_vc_ivw_se_e32m1:
1335 ; CHECK: # %bb.0: # %entry
1336 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1337 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
1340 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1344 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
1346 define void @test_sf_vc_ivw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1347 ; CHECK-LABEL: test_sf_vc_ivw_se_e32m2:
1348 ; CHECK: # %bb.0: # %entry
1349 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1350 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
1353 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1357 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
1359 define void @test_sf_vc_ivw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1360 ; CHECK-LABEL: test_sf_vc_ivw_se_e32m4:
1361 ; CHECK: # %bb.0: # %entry
1362 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1363 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
1366 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1370 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
1372 define <vscale x 1 x i16> @test_sf_vc_v_ivw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1373 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8:
1374 ; CHECK: # %bb.0: # %entry
1375 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1376 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1379 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1380 ret <vscale x 1 x i16> %0
1383 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
1385 define <vscale x 2 x i16> @test_sf_vc_v_ivw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1386 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4:
1387 ; CHECK: # %bb.0: # %entry
1388 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
1389 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1392 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1393 ret <vscale x 2 x i16> %0
1396 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
1398 define <vscale x 4 x i16> @test_sf_vc_v_ivw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1399 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2:
1400 ; CHECK: # %bb.0: # %entry
1401 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
1402 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1405 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1406 ret <vscale x 4 x i16> %0
1409 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
1411 define <vscale x 8 x i16> @test_sf_vc_v_ivw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1412 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1:
1413 ; CHECK: # %bb.0: # %entry
1414 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
1415 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1418 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1419 ret <vscale x 8 x i16> %0
1422 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
1424 define <vscale x 16 x i16> @test_sf_vc_v_ivw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1425 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2:
1426 ; CHECK: # %bb.0: # %entry
1427 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
1428 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1431 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1432 ret <vscale x 16 x i16> %0
1435 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
1437 define <vscale x 32 x i16> @test_sf_vc_v_ivw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1438 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4:
1439 ; CHECK: # %bb.0: # %entry
1440 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
1441 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1444 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1445 ret <vscale x 32 x i16> %0
1448 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
1450 define <vscale x 1 x i32> @test_sf_vc_v_ivw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1451 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4:
1452 ; CHECK: # %bb.0: # %entry
1453 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1454 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1457 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1458 ret <vscale x 1 x i32> %0
1461 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
1463 define <vscale x 2 x i32> @test_sf_vc_v_ivw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1464 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2:
1465 ; CHECK: # %bb.0: # %entry
1466 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
1467 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1470 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1471 ret <vscale x 2 x i32> %0
1474 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
1476 define <vscale x 4 x i32> @test_sf_vc_v_ivw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1477 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1:
1478 ; CHECK: # %bb.0: # %entry
1479 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
1480 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1483 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1484 ret <vscale x 4 x i32> %0
1487 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
1489 define <vscale x 8 x i32> @test_sf_vc_v_ivw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1490 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2:
1491 ; CHECK: # %bb.0: # %entry
1492 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
1493 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1496 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1497 ret <vscale x 8 x i32> %0
1500 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
1502 define <vscale x 16 x i32> @test_sf_vc_v_ivw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1503 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4:
1504 ; CHECK: # %bb.0: # %entry
1505 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
1506 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1509 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1510 ret <vscale x 16 x i32> %0
1513 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
1515 define <vscale x 1 x i64> @test_sf_vc_v_ivw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1516 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2:
1517 ; CHECK: # %bb.0: # %entry
1518 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
1519 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1522 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1523 ret <vscale x 1 x i64> %0
1526 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
1528 define <vscale x 2 x i64> @test_sf_vc_v_ivw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1529 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1:
1530 ; CHECK: # %bb.0: # %entry
1531 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1532 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1535 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1536 ret <vscale x 2 x i64> %0
1539 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
1541 define <vscale x 4 x i64> @test_sf_vc_v_ivw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1542 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2:
1543 ; CHECK: # %bb.0: # %entry
1544 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
1545 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1548 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1549 ret <vscale x 4 x i64> %0
1552 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
1554 define <vscale x 8 x i64> @test_sf_vc_v_ivw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1555 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4:
1556 ; CHECK: # %bb.0: # %entry
1557 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
1558 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1561 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1562 ret <vscale x 8 x i64> %0
1565 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
1567 define <vscale x 1 x i16> @test_sf_vc_v_ivw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1568 ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8:
1569 ; CHECK: # %bb.0: # %entry
1570 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1571 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1574 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1575 ret <vscale x 1 x i16> %0
1578 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
1580 define <vscale x 2 x i16> @test_sf_vc_v_ivw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1581 ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4:
1582 ; CHECK: # %bb.0: # %entry
1583 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
1584 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1587 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1588 ret <vscale x 2 x i16> %0
1591 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
1593 define <vscale x 4 x i16> @test_sf_vc_v_ivw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1594 ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2:
1595 ; CHECK: # %bb.0: # %entry
1596 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
1597 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1600 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1601 ret <vscale x 4 x i16> %0
1604 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
1606 define <vscale x 8 x i16> @test_sf_vc_v_ivw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1607 ; CHECK-LABEL: test_sf_vc_v_ivw_e8m1:
1608 ; CHECK: # %bb.0: # %entry
1609 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
1610 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1613 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1614 ret <vscale x 8 x i16> %0
1617 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
1619 define <vscale x 16 x i16> @test_sf_vc_v_ivw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1620 ; CHECK-LABEL: test_sf_vc_v_ivw_e8m2:
1621 ; CHECK: # %bb.0: # %entry
1622 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
1623 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1626 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1627 ret <vscale x 16 x i16> %0
1630 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
1632 define <vscale x 32 x i16> @test_sf_vc_v_ivw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1633 ; CHECK-LABEL: test_sf_vc_v_ivw_e8m4:
1634 ; CHECK: # %bb.0: # %entry
1635 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
1636 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1639 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1640 ret <vscale x 32 x i16> %0
1643 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
1645 define <vscale x 1 x i32> @test_sf_vc_v_ivw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1646 ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4:
1647 ; CHECK: # %bb.0: # %entry
1648 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1649 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1652 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1653 ret <vscale x 1 x i32> %0
1656 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
1658 define <vscale x 2 x i32> @test_sf_vc_v_ivw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1659 ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2:
1660 ; CHECK: # %bb.0: # %entry
1661 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
1662 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1665 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1666 ret <vscale x 2 x i32> %0
1669 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
1671 define <vscale x 4 x i32> @test_sf_vc_v_ivw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1672 ; CHECK-LABEL: test_sf_vc_v_ivw_e16m1:
1673 ; CHECK: # %bb.0: # %entry
1674 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
1675 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1678 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1679 ret <vscale x 4 x i32> %0
1682 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
1684 define <vscale x 8 x i32> @test_sf_vc_v_ivw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1685 ; CHECK-LABEL: test_sf_vc_v_ivw_e16m2:
1686 ; CHECK: # %bb.0: # %entry
1687 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
1688 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1691 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1692 ret <vscale x 8 x i32> %0
1695 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
1697 define <vscale x 16 x i32> @test_sf_vc_v_ivw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1698 ; CHECK-LABEL: test_sf_vc_v_ivw_e16m4:
1699 ; CHECK: # %bb.0: # %entry
1700 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
1701 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1704 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1705 ret <vscale x 16 x i32> %0
1708 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
1710 define <vscale x 1 x i64> @test_sf_vc_v_ivw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1711 ; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2:
1712 ; CHECK: # %bb.0: # %entry
1713 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
1714 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1717 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1718 ret <vscale x 1 x i64> %0
1721 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
1723 define <vscale x 2 x i64> @test_sf_vc_v_ivw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1724 ; CHECK-LABEL: test_sf_vc_v_ivw_e32m1:
1725 ; CHECK: # %bb.0: # %entry
1726 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1727 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1730 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1731 ret <vscale x 2 x i64> %0
1734 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
1736 define <vscale x 4 x i64> @test_sf_vc_v_ivw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1737 ; CHECK-LABEL: test_sf_vc_v_ivw_e32m2:
1738 ; CHECK: # %bb.0: # %entry
1739 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
1740 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1743 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1744 ret <vscale x 4 x i64> %0
1747 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
1749 define <vscale x 8 x i64> @test_sf_vc_v_ivw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1750 ; CHECK-LABEL: test_sf_vc_v_ivw_e32m4:
1751 ; CHECK: # %bb.0: # %entry
1752 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
1753 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1756 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1757 ret <vscale x 8 x i64> %0
1760 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
1762 define void @test_sf_vc_fwvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
1763 ; CHECK-LABEL: test_sf_vc_fwvv_se_e32mf2:
1764 ; CHECK: # %bb.0: # %entry
1765 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1766 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
1769 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
1773 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
1775 define <vscale x 1 x float> @test_sf_vc_fw_fwvvv_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
1776 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32mf2:
1777 ; CHECK: # %bb.0: # %entry
1778 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1779 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
1782 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
1783 ret <vscale x 1 x float> %0
1786 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
1788 define void @test_sf_vc_fwvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
1789 ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m1:
1790 ; CHECK: # %bb.0: # %entry
1791 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1792 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
1795 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
1799 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
1801 define <vscale x 2 x float> @test_sf_vc_fw_fwvvv_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
1802 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m1:
1803 ; CHECK: # %bb.0: # %entry
1804 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
1805 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
1808 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
1809 ret <vscale x 2 x float> %0
1812 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
1814 define void @test_sf_vc_fwvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
1815 ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m2:
1816 ; CHECK: # %bb.0: # %entry
1817 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1818 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
1821 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
1825 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
1827 define <vscale x 4 x float> @test_sf_vc_fw_fwvvv_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
1828 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m2:
1829 ; CHECK: # %bb.0: # %entry
1830 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
1831 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
1834 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
1835 ret <vscale x 4 x float> %0
1838 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
1840 define void @test_sf_vc_fwvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
1841 ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m4:
1842 ; CHECK: # %bb.0: # %entry
1843 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1844 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
1847 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
1851 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
1853 define <vscale x 8 x float> @test_sf_vc_fw_fwvvv_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
1854 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m4:
1855 ; CHECK: # %bb.0: # %entry
1856 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
1857 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
1860 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
1861 ret <vscale x 8 x float> %0
1864 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
1866 define void @test_sf_vc_fwvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
1867 ; CHECK-LABEL: test_sf_vc_fwvv_se_e32m8:
1868 ; CHECK: # %bb.0: # %entry
1869 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1870 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
1873 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
1877 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
1879 define <vscale x 16 x float> @test_sf_vc_fw_fwvvv_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
1880 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e32m8:
1881 ; CHECK: # %bb.0: # %entry
1882 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
1883 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
1886 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
1887 ret <vscale x 16 x float> %0
1890 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
1892 define void @test_sf_vc_fwvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
1893 ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m1:
1894 ; CHECK: # %bb.0: # %entry
1895 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1896 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
1899 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
1903 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
1905 define <vscale x 1 x double> @test_sf_vc_fw_fwvvv_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
1906 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m1:
1907 ; CHECK: # %bb.0: # %entry
1908 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
1909 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
1912 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
1913 ret <vscale x 1 x double> %0
1916 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
1918 define void @test_sf_vc_fwvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
1919 ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m2:
1920 ; CHECK: # %bb.0: # %entry
1921 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1922 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
1925 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
1929 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
1931 define <vscale x 2 x double> @test_sf_vc_fw_fwvvv_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
1932 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m2:
1933 ; CHECK: # %bb.0: # %entry
1934 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1935 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
1938 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
1939 ret <vscale x 2 x double> %0
1942 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
1944 define void @test_sf_vc_fwvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
1945 ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m4:
1946 ; CHECK: # %bb.0: # %entry
1947 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1948 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
1951 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
1955 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
1957 define <vscale x 4 x double> @test_sf_vc_fw_fwvvv_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
1958 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m4:
1959 ; CHECK: # %bb.0: # %entry
1960 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
1961 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
1964 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
1965 ret <vscale x 4 x double> %0
1968 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
1970 define void @test_sf_vc_fwvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
1971 ; CHECK-LABEL: test_sf_vc_fwvv_se_e64m8:
1972 ; CHECK: # %bb.0: # %entry
1973 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1974 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
1977 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
1981 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
1983 define <vscale x 8 x double> @test_sf_vc_fw_fwvvv_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
1984 ; CHECK-LABEL: test_sf_vc_fw_fwvvv_se_e64m8:
1985 ; CHECK: # %bb.0: # %entry
1986 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
1987 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
1990 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
1991 ret <vscale x 8 x double> %0
1994 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
1996 define void @test_sf_vc_fwvx_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl) {
1997 ; CHECK-LABEL: test_sf_vc_fwvx_se_e32mf2:
1998 ; CHECK: # %bb.0: # %entry
1999 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2000 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
2003 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
2007 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i16>, i16, iXLen)
2009 define <vscale x 1 x float> @test_sf_vc_w_fwvx_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl) {
2010 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32mf2:
2011 ; CHECK: # %bb.0: # %entry
2012 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
2013 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2016 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
2017 ret <vscale x 1 x float> %0
2020 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.nxv1f16.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i16>, i16, iXLen)
2022 define void @test_sf_vc_fwvx_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl) {
2023 ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m1:
2024 ; CHECK: # %bb.0: # %entry
2025 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2026 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
2029 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
2033 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i16>, i16, iXLen)
2035 define <vscale x 2 x float> @test_sf_vc_w_fwvx_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl) {
2036 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m1:
2037 ; CHECK: # %bb.0: # %entry
2038 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
2039 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2042 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
2043 ret <vscale x 2 x float> %0
2046 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.nxv2f16.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i16>, i16, iXLen)
2048 define void @test_sf_vc_fwvx_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl) {
2049 ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m2:
2050 ; CHECK: # %bb.0: # %entry
2051 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2052 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
2055 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
2059 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i16>, i16, iXLen)
2061 define <vscale x 4 x float> @test_sf_vc_w_fwvx_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl) {
2062 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m2:
2063 ; CHECK: # %bb.0: # %entry
2064 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
2065 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
2068 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
2069 ret <vscale x 4 x float> %0
2072 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.nxv4f16.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i16>, i16, iXLen)
2074 define void @test_sf_vc_fwvx_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl) {
2075 ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m4:
2076 ; CHECK: # %bb.0: # %entry
2077 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2078 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
2081 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
2085 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i16>, i16, iXLen)
2087 define <vscale x 8 x float> @test_sf_vc_w_fwvx_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl) {
2088 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m4:
2089 ; CHECK: # %bb.0: # %entry
2090 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
2091 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
2094 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
2095 ret <vscale x 8 x float> %0
2098 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.nxv8f16.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i16>, i16, iXLen)
2100 define void @test_sf_vc_fwvx_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl) {
2101 ; CHECK-LABEL: test_sf_vc_fwvx_se_e32m8:
2102 ; CHECK: # %bb.0: # %entry
2103 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2104 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
2107 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
2111 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i16>, i16, iXLen)
2113 define <vscale x 16 x float> @test_sf_vc_w_fwvx_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl) {
2114 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e32m8:
2115 ; CHECK: # %bb.0: # %entry
2116 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
2117 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
2120 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
2121 ret <vscale x 16 x float> %0
2124 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.nxv16f16.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i16>, i16, iXLen)
2126 define void @test_sf_vc_fwvx_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl) {
2127 ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m1:
2128 ; CHECK: # %bb.0: # %entry
2129 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2130 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
2133 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
2137 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i32>, i32, iXLen)
2139 define <vscale x 1 x double> @test_sf_vc_w_fwvx_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl) {
2140 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m1:
2141 ; CHECK: # %bb.0: # %entry
2142 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
2143 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2146 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
2147 ret <vscale x 1 x double> %0
2150 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.nxv1f32.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i32>, i32, iXLen)
2152 define void @test_sf_vc_fwvx_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl) {
2153 ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m2:
2154 ; CHECK: # %bb.0: # %entry
2155 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2156 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
2159 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
2163 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i32>, i32, iXLen)
2165 define <vscale x 2 x double> @test_sf_vc_w_fwvx_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl) {
2166 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m2:
2167 ; CHECK: # %bb.0: # %entry
2168 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
2169 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
2172 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
2173 ret <vscale x 2 x double> %0
2176 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.nxv2f32.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i32>, i32, iXLen)
2178 define void @test_sf_vc_fwvx_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl) {
2179 ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m4:
2180 ; CHECK: # %bb.0: # %entry
2181 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2182 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
2185 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
2189 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i32>, i32, iXLen)
2191 define <vscale x 4 x double> @test_sf_vc_w_fwvx_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl) {
2192 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m4:
2193 ; CHECK: # %bb.0: # %entry
2194 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
2195 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
2198 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
2199 ret <vscale x 4 x double> %0
2202 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.nxv4f32.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i32>, i32, iXLen)
2204 define void @test_sf_vc_fwvx_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl) {
2205 ; CHECK-LABEL: test_sf_vc_fwvx_se_e64m8:
2206 ; CHECK: # %bb.0: # %entry
2207 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2208 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
2211 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
2215 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i32>, i32, iXLen)
2217 define <vscale x 8 x double> @test_sf_vc_w_fwvx_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl) {
2218 ; CHECK-LABEL: test_sf_vc_w_fwvx_se_e64m8:
2219 ; CHECK: # %bb.0: # %entry
2220 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
2221 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
2224 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
2225 ret <vscale x 8 x double> %0
2228 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.nxv8f32.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i32>, i32, iXLen)
2230 define void @test_sf_vc_fwvi_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
2231 ; CHECK-LABEL: test_sf_vc_fwvi_se_e32mf2:
2232 ; CHECK: # %bb.0: # %entry
2233 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2234 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3
2237 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, iXLen 3, iXLen %vl)
2241 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i16>, iXLen, iXLen)
2243 define <vscale x 1 x float> @test_sf_vc_fw_fwvi_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
2244 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32mf2:
2245 ; CHECK: # %bb.0: # %entry
2246 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2247 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3
2250 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, iXLen 3, iXLen %vl)
2251 ret <vscale x 1 x float> %0
2254 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.nxv1f16.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i16>, iXLen, iXLen)
2256 define void @test_sf_vc_fwvi_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
2257 ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m1:
2258 ; CHECK: # %bb.0: # %entry
2259 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2260 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3
2263 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, iXLen 3, iXLen %vl)
2267 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i16>, iXLen, iXLen)
2269 define <vscale x 2 x float> @test_sf_vc_fw_fwvi_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
2270 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m1:
2271 ; CHECK: # %bb.0: # %entry
2272 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
2273 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3
2276 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, iXLen 3, iXLen %vl)
2277 ret <vscale x 2 x float> %0
2280 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.nxv2f16.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i16>, iXLen, iXLen)
2282 define void @test_sf_vc_fwvi_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
2283 ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m2:
2284 ; CHECK: # %bb.0: # %entry
2285 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2286 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 3
2289 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, iXLen 3, iXLen %vl)
2293 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i16>, iXLen, iXLen)
2295 define <vscale x 4 x float> @test_sf_vc_fw_fwvi_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
2296 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m2:
2297 ; CHECK: # %bb.0: # %entry
2298 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
2299 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 3
2302 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, iXLen 3, iXLen %vl)
2303 ret <vscale x 4 x float> %0
2306 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.nxv4f16.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i16>, iXLen, iXLen)
2308 define void @test_sf_vc_fwvi_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
2309 ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m4:
2310 ; CHECK: # %bb.0: # %entry
2311 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2312 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 3
2315 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, iXLen 3, iXLen %vl)
2319 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i16>, iXLen, iXLen)
2321 define <vscale x 8 x float> @test_sf_vc_fw_fwvi_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
2322 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m4:
2323 ; CHECK: # %bb.0: # %entry
2324 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
2325 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 3
2328 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, iXLen 3, iXLen %vl)
2329 ret <vscale x 8 x float> %0
2332 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.nxv8f16.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i16>, iXLen, iXLen)
2334 define void @test_sf_vc_fwvi_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
2335 ; CHECK-LABEL: test_sf_vc_fwvi_se_e32m8:
2336 ; CHECK: # %bb.0: # %entry
2337 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2338 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 3
2341 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, iXLen 3, iXLen %vl)
2345 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i16>, iXLen, iXLen)
2347 define <vscale x 16 x float> @test_sf_vc_fw_fwvi_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
2348 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e32m8:
2349 ; CHECK: # %bb.0: # %entry
2350 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
2351 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 3
2354 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, iXLen 3, iXLen %vl)
2355 ret <vscale x 16 x float> %0
2358 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.nxv16f16.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i16>, iXLen, iXLen)
2360 define void @test_sf_vc_fwvi_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
2361 ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m1:
2362 ; CHECK: # %bb.0: # %entry
2363 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2364 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 3
2367 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, iXLen 3, iXLen %vl)
2371 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i32>, iXLen, iXLen)
2373 define <vscale x 1 x double> @test_sf_vc_fw_fwvi_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
2374 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m1:
2375 ; CHECK: # %bb.0: # %entry
2376 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
2377 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 3
2380 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, iXLen 3, iXLen %vl)
2381 ret <vscale x 1 x double> %0
2384 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.nxv1f32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i32>, iXLen, iXLen)
2386 define void @test_sf_vc_fwvi_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
2387 ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m2:
2388 ; CHECK: # %bb.0: # %entry
2389 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2390 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 3
2393 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, iXLen 3, iXLen %vl)
2397 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i32>, iXLen, iXLen)
2399 define <vscale x 2 x double> @test_sf_vc_fw_fwvi_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
2400 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m2:
2401 ; CHECK: # %bb.0: # %entry
2402 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
2403 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 3
2406 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, iXLen 3, iXLen %vl)
2407 ret <vscale x 2 x double> %0
2410 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.nxv2f32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i32>, iXLen, iXLen)
2412 define void @test_sf_vc_fwvi_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
2413 ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m4:
2414 ; CHECK: # %bb.0: # %entry
2415 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2416 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 3
2419 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, iXLen 3, iXLen %vl)
2423 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i32>, iXLen, iXLen)
2425 define <vscale x 4 x double> @test_sf_vc_fw_fwvi_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
2426 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m4:
2427 ; CHECK: # %bb.0: # %entry
2428 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
2429 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 3
2432 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, iXLen 3, iXLen %vl)
2433 ret <vscale x 4 x double> %0
2436 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.nxv4f32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i32>, iXLen, iXLen)
2438 define void @test_sf_vc_fwvi_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
2439 ; CHECK-LABEL: test_sf_vc_fwvi_se_e64m8:
2440 ; CHECK: # %bb.0: # %entry
2441 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2442 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 3
2445 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, iXLen 3, iXLen %vl)
2449 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i32>, iXLen, iXLen)
2451 define <vscale x 8 x double> @test_sf_vc_fw_fwvi_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
2452 ; CHECK-LABEL: test_sf_vc_fw_fwvi_se_e64m8:
2453 ; CHECK: # %bb.0: # %entry
2454 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
2455 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 3
2458 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, iXLen 3, iXLen %vl)
2459 ret <vscale x 8 x double> %0
2462 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.nxv8f32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i32>, iXLen, iXLen)
2464 define void @test_sf_vc_fwvf_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl) {
2465 ; CHECK-LABEL: test_sf_vc_fwvf_se_e32mf2:
2466 ; CHECK: # %bb.0: # %entry
2467 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2468 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
2471 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl)
2475 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i16>, half, iXLen)
2477 define <vscale x 1 x float> @test_sf_vc_fw_fwvf_se_e32mf2(<vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl) {
2478 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32mf2:
2479 ; CHECK: # %bb.0: # %entry
2480 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2481 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
2484 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x i16> %vs2, half %rs1, iXLen %vl)
2485 ret <vscale x 1 x float> %0
2488 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.nxv1f16.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x i16>, half, iXLen)
2490 define void @test_sf_vc_fwvf_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl) {
2491 ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m1:
2492 ; CHECK: # %bb.0: # %entry
2493 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2494 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
2497 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl)
2501 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i16>, half, iXLen)
2503 define <vscale x 2 x float> @test_sf_vc_fw_fwvf_se_e32m1(<vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl) {
2504 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m1:
2505 ; CHECK: # %bb.0: # %entry
2506 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
2507 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
2510 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x i16> %vs2, half %rs1, iXLen %vl)
2511 ret <vscale x 2 x float> %0
2514 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.nxv2f16.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x i16>, half, iXLen)
2516 define void @test_sf_vc_fwvf_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl) {
2517 ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m2:
2518 ; CHECK: # %bb.0: # %entry
2519 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2520 ; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
2523 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl)
2527 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i16>, half, iXLen)
2529 define <vscale x 4 x float> @test_sf_vc_fw_fwvf_se_e32m2(<vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl) {
2530 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m2:
2531 ; CHECK: # %bb.0: # %entry
2532 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
2533 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
2536 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x i16> %vs2, half %rs1, iXLen %vl)
2537 ret <vscale x 4 x float> %0
2540 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.nxv4f16.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x i16>, half, iXLen)
2542 define void @test_sf_vc_fwvf_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl) {
2543 ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m4:
2544 ; CHECK: # %bb.0: # %entry
2545 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2546 ; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
2549 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl)
2553 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i16>, half, iXLen)
2555 define <vscale x 8 x float> @test_sf_vc_fw_fwvf_se_e32m4(<vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl) {
2556 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m4:
2557 ; CHECK: # %bb.0: # %entry
2558 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
2559 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
2562 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x i16> %vs2, half %rs1, iXLen %vl)
2563 ret <vscale x 8 x float> %0
2566 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.nxv8f16.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x i16>, half, iXLen)
2568 define void @test_sf_vc_fwvf_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl) {
2569 ; CHECK-LABEL: test_sf_vc_fwvf_se_e32m8:
2570 ; CHECK: # %bb.0: # %entry
2571 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2572 ; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0
2575 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl)
2579 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i16>, half, iXLen)
2581 define <vscale x 16 x float> @test_sf_vc_fw_fwvf_se_e32m8(<vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl) {
2582 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e32m8:
2583 ; CHECK: # %bb.0: # %entry
2584 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
2585 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
2588 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x i16> %vs2, half %rs1, iXLen %vl)
2589 ret <vscale x 16 x float> %0
2592 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.nxv16f16.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x i16>, half, iXLen)
2594 define void @test_sf_vc_fwvf_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl) {
2595 ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m1:
2596 ; CHECK: # %bb.0: # %entry
2597 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2598 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
2601 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl)
2605 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i32>, float, iXLen)
2607 define <vscale x 1 x double> @test_sf_vc_fw_fwvf_se_e64m1(<vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl) {
2608 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m1:
2609 ; CHECK: # %bb.0: # %entry
2610 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
2611 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
2614 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x i32> %vs2, float %rs1, iXLen %vl)
2615 ret <vscale x 1 x double> %0
2618 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.nxv1f32.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x i32>, float, iXLen)
2620 define void @test_sf_vc_fwvf_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl) {
2621 ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m2:
2622 ; CHECK: # %bb.0: # %entry
2623 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2624 ; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
2627 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl)
2631 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i32>, float, iXLen)
2633 define <vscale x 2 x double> @test_sf_vc_fw_fwvf_se_e64m2(<vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl) {
2634 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m2:
2635 ; CHECK: # %bb.0: # %entry
2636 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
2637 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
2640 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x i32> %vs2, float %rs1, iXLen %vl)
2641 ret <vscale x 2 x double> %0
2644 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.nxv2f32.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x i32>, float, iXLen)
2646 define void @test_sf_vc_fwvf_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl) {
2647 ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m4:
2648 ; CHECK: # %bb.0: # %entry
2649 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2650 ; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
2653 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl)
2657 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i32>, float, iXLen)
2659 define <vscale x 4 x double> @test_sf_vc_fw_fwvf_se_e64m4(<vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl) {
2660 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m4:
2661 ; CHECK: # %bb.0: # %entry
2662 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
2663 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
2666 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x i32> %vs2, float %rs1, iXLen %vl)
2667 ret <vscale x 4 x double> %0
2670 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.nxv4f32.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x i32>, float, iXLen)
2672 define void @test_sf_vc_fwvf_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl) {
2673 ; CHECK-LABEL: test_sf_vc_fwvf_se_e64m8:
2674 ; CHECK: # %bb.0: # %entry
2675 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2676 ; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0
2679 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl)
2683 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i32>, float, iXLen)
2685 define <vscale x 8 x double> @test_sf_vc_fw_fwvf_se_e64m8(<vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl) {
2686 ; CHECK-LABEL: test_sf_vc_fw_fwvf_se_e64m8:
2687 ; CHECK: # %bb.0: # %entry
2688 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
2689 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
2692 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x i32> %vs2, float %rs1, iXLen %vl)
2693 ret <vscale x 8 x double> %0
2696 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.nxv8f32.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x i32>, float, iXLen)