1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+xsfvcp \
3 ; RUN: -verify-machineinstrs | FileCheck %s
4 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+xsfvcp \
5 ; RUN: -verify-machineinstrs | FileCheck %s
7 define void @test_sf_vc_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
8 ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf8:
9 ; CHECK: # %bb.0: # %entry
10 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
11 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
14 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
18 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i16.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
20 define void @test_sf_vc_vvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
21 ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf4:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
24 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
27 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
31 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i16.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
33 define void @test_sf_vc_vvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
34 ; CHECK-LABEL: test_sf_vc_vvw_se_e8mf2:
35 ; CHECK: # %bb.0: # %entry
36 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
37 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
40 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
44 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i16.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
46 define void @test_sf_vc_vvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
47 ; CHECK-LABEL: test_sf_vc_vvw_se_e8m1:
48 ; CHECK: # %bb.0: # %entry
49 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
50 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
53 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
57 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i16.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
59 define void @test_sf_vc_vvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
60 ; CHECK-LABEL: test_sf_vc_vvw_se_e8m2:
61 ; CHECK: # %bb.0: # %entry
62 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
63 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
66 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
70 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i16.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
72 define void @test_sf_vc_vvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
73 ; CHECK-LABEL: test_sf_vc_vvw_se_e8m4:
74 ; CHECK: # %bb.0: # %entry
75 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
76 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
79 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
83 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv32i16.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
85 define void @test_sf_vc_vvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
86 ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf4:
87 ; CHECK: # %bb.0: # %entry
88 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
89 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
92 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
96 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i32.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
98 define void @test_sf_vc_vvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
99 ; CHECK-LABEL: test_sf_vc_vvw_se_e16mf2:
100 ; CHECK: # %bb.0: # %entry
101 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
102 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
105 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
109 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i32.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
111 define void @test_sf_vc_vvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
112 ; CHECK-LABEL: test_sf_vc_vvw_se_e16m1:
113 ; CHECK: # %bb.0: # %entry
114 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
115 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
118 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
122 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i32.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
124 define void @test_sf_vc_vvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
125 ; CHECK-LABEL: test_sf_vc_vvw_se_e16m2:
126 ; CHECK: # %bb.0: # %entry
127 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
128 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
131 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
135 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i32.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
137 define void @test_sf_vc_vvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
138 ; CHECK-LABEL: test_sf_vc_vvw_se_e16m4:
139 ; CHECK: # %bb.0: # %entry
140 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
141 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
144 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
148 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16i32.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
150 define void @test_sf_vc_vvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
151 ; CHECK-LABEL: test_sf_vc_vvw_se_e32mf2:
152 ; CHECK: # %bb.0: # %entry
153 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
154 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
157 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
161 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1i64.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
163 define void @test_sf_vc_vvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
164 ; CHECK-LABEL: test_sf_vc_vvw_se_e32m1:
165 ; CHECK: # %bb.0: # %entry
166 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
167 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
170 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
174 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2i64.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
176 define void @test_sf_vc_vvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
177 ; CHECK-LABEL: test_sf_vc_vvw_se_e32m2:
178 ; CHECK: # %bb.0: # %entry
179 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
180 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
183 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
187 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4i64.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
189 define void @test_sf_vc_vvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
190 ; CHECK-LABEL: test_sf_vc_vvw_se_e32m4:
191 ; CHECK: # %bb.0: # %entry
192 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
193 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
196 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
200 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8i64.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
202 define <vscale x 1 x i16> @test_sf_vc_v_vvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
203 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf8:
204 ; CHECK: # %bb.0: # %entry
205 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
206 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
209 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
210 ret <vscale x 1 x i16> %0
213 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
215 define <vscale x 2 x i16> @test_sf_vc_v_vvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
216 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf4:
217 ; CHECK: # %bb.0: # %entry
218 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
219 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
222 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
223 ret <vscale x 2 x i16> %0
226 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
228 define <vscale x 4 x i16> @test_sf_vc_v_vvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
229 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8mf2:
230 ; CHECK: # %bb.0: # %entry
231 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
232 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
235 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
236 ret <vscale x 4 x i16> %0
239 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
241 define <vscale x 8 x i16> @test_sf_vc_v_vvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
242 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m1:
243 ; CHECK: # %bb.0: # %entry
244 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
245 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
248 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
249 ret <vscale x 8 x i16> %0
252 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
254 define <vscale x 16 x i16> @test_sf_vc_v_vvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
255 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m2:
256 ; CHECK: # %bb.0: # %entry
257 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
258 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
261 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
262 ret <vscale x 16 x i16> %0
265 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
267 define <vscale x 32 x i16> @test_sf_vc_v_vvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
268 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e8m4:
269 ; CHECK: # %bb.0: # %entry
270 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
271 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
274 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
275 ret <vscale x 32 x i16> %0
278 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.se.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
280 define <vscale x 1 x i32> @test_sf_vc_v_vvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
281 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf4:
282 ; CHECK: # %bb.0: # %entry
283 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
284 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
287 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
288 ret <vscale x 1 x i32> %0
291 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
293 define <vscale x 2 x i32> @test_sf_vc_v_vvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
294 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16mf2:
295 ; CHECK: # %bb.0: # %entry
296 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
297 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
300 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
301 ret <vscale x 2 x i32> %0
304 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
306 define <vscale x 4 x i32> @test_sf_vc_v_vvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
307 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m1:
308 ; CHECK: # %bb.0: # %entry
309 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
310 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
313 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
314 ret <vscale x 4 x i32> %0
317 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
319 define <vscale x 8 x i32> @test_sf_vc_v_vvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
320 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m2:
321 ; CHECK: # %bb.0: # %entry
322 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
323 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
326 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
327 ret <vscale x 8 x i32> %0
330 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
332 define <vscale x 16 x i32> @test_sf_vc_v_vvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
333 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e16m4:
334 ; CHECK: # %bb.0: # %entry
335 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
336 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
339 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
340 ret <vscale x 16 x i32> %0
343 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.se.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
345 define <vscale x 1 x i64> @test_sf_vc_v_vvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
346 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32mf2:
347 ; CHECK: # %bb.0: # %entry
348 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
349 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
352 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
353 ret <vscale x 1 x i64> %0
356 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
358 define <vscale x 2 x i64> @test_sf_vc_v_vvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
359 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m1:
360 ; CHECK: # %bb.0: # %entry
361 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
362 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
365 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
366 ret <vscale x 2 x i64> %0
369 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
371 define <vscale x 4 x i64> @test_sf_vc_v_vvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
372 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m2:
373 ; CHECK: # %bb.0: # %entry
374 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
375 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
378 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
379 ret <vscale x 4 x i64> %0
382 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
384 define <vscale x 8 x i64> @test_sf_vc_v_vvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
385 ; CHECK-LABEL: test_sf_vc_v_vvw_se_e32m4:
386 ; CHECK: # %bb.0: # %entry
387 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
388 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
391 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
392 ret <vscale x 8 x i64> %0
395 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.se.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
397 define <vscale x 1 x i16> @test_sf_vc_v_vvw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl) {
398 ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf8:
399 ; CHECK: # %bb.0: # %entry
400 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
401 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
404 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, <vscale x 1 x i8> %vs1, iXLen %vl)
405 ret <vscale x 1 x i16> %0
408 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.vvw.nxv1i16.iXLen.nxv1i8.nxv1i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, <vscale x 1 x i8>, iXLen)
410 define <vscale x 2 x i16> @test_sf_vc_v_vvw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl) {
411 ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf4:
412 ; CHECK: # %bb.0: # %entry
413 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
414 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
417 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, <vscale x 2 x i8> %vs1, iXLen %vl)
418 ret <vscale x 2 x i16> %0
421 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.vvw.nxv2i16.iXLen.nxv2i8.nxv2i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, <vscale x 2 x i8>, iXLen)
423 define <vscale x 4 x i16> @test_sf_vc_v_vvw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl) {
424 ; CHECK-LABEL: test_sf_vc_v_vvw_e8mf2:
425 ; CHECK: # %bb.0: # %entry
426 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
427 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
430 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, <vscale x 4 x i8> %vs1, iXLen %vl)
431 ret <vscale x 4 x i16> %0
434 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.vvw.nxv4i16.iXLen.nxv4i8.nxv4i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, <vscale x 4 x i8>, iXLen)
436 define <vscale x 8 x i16> @test_sf_vc_v_vvw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl) {
437 ; CHECK-LABEL: test_sf_vc_v_vvw_e8m1:
438 ; CHECK: # %bb.0: # %entry
439 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
440 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
443 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, <vscale x 8 x i8> %vs1, iXLen %vl)
444 ret <vscale x 8 x i16> %0
447 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.vvw.nxv8i16.iXLen.nxv8i8.nxv8i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, <vscale x 8 x i8>, iXLen)
449 define <vscale x 16 x i16> @test_sf_vc_v_vvw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl) {
450 ; CHECK-LABEL: test_sf_vc_v_vvw_e8m2:
451 ; CHECK: # %bb.0: # %entry
452 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
453 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
456 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, <vscale x 16 x i8> %vs1, iXLen %vl)
457 ret <vscale x 16 x i16> %0
460 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.vvw.nxv16i16.iXLen.nxv16i8.nxv16i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, <vscale x 16 x i8>, iXLen)
462 define <vscale x 32 x i16> @test_sf_vc_v_vvw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl) {
463 ; CHECK-LABEL: test_sf_vc_v_vvw_e8m4:
464 ; CHECK: # %bb.0: # %entry
465 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
466 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
469 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, <vscale x 32 x i8> %vs1, iXLen %vl)
470 ret <vscale x 32 x i16> %0
473 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.vvw.nxv32i16.iXLen.nxv32i8.nxv32i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, <vscale x 32 x i8>, iXLen)
475 define <vscale x 1 x i32> @test_sf_vc_v_vvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl) {
476 ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf4:
477 ; CHECK: # %bb.0: # %entry
478 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
479 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
482 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, <vscale x 1 x i16> %vs1, iXLen %vl)
483 ret <vscale x 1 x i32> %0
486 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.vvw.nxv1i32.iXLen.nxv1i16.nxv1i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, <vscale x 1 x i16>, iXLen)
488 define <vscale x 2 x i32> @test_sf_vc_v_vvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl) {
489 ; CHECK-LABEL: test_sf_vc_v_vvw_e16mf2:
490 ; CHECK: # %bb.0: # %entry
491 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
492 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
495 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, <vscale x 2 x i16> %vs1, iXLen %vl)
496 ret <vscale x 2 x i32> %0
499 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.vvw.nxv2i32.iXLen.nxv2i16.nxv2i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, <vscale x 2 x i16>, iXLen)
501 define <vscale x 4 x i32> @test_sf_vc_v_vvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl) {
502 ; CHECK-LABEL: test_sf_vc_v_vvw_e16m1:
503 ; CHECK: # %bb.0: # %entry
504 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
505 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
508 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, <vscale x 4 x i16> %vs1, iXLen %vl)
509 ret <vscale x 4 x i32> %0
512 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.vvw.nxv4i32.iXLen.nxv4i16.nxv4i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, <vscale x 4 x i16>, iXLen)
514 define <vscale x 8 x i32> @test_sf_vc_v_vvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl) {
515 ; CHECK-LABEL: test_sf_vc_v_vvw_e16m2:
516 ; CHECK: # %bb.0: # %entry
517 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
518 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
521 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, <vscale x 8 x i16> %vs1, iXLen %vl)
522 ret <vscale x 8 x i32> %0
525 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.vvw.nxv8i32.iXLen.nxv8i16.nxv8i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, <vscale x 8 x i16>, iXLen)
527 define <vscale x 16 x i32> @test_sf_vc_v_vvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl) {
528 ; CHECK-LABEL: test_sf_vc_v_vvw_e16m4:
529 ; CHECK: # %bb.0: # %entry
530 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
531 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
534 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, <vscale x 16 x i16> %vs1, iXLen %vl)
535 ret <vscale x 16 x i32> %0
538 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.vvw.nxv16i32.iXLen.nxv16i16.nxv16i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, <vscale x 16 x i16>, iXLen)
540 define <vscale x 1 x i64> @test_sf_vc_v_vvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl) {
541 ; CHECK-LABEL: test_sf_vc_v_vvw_e32mf2:
542 ; CHECK: # %bb.0: # %entry
543 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
544 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
547 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, <vscale x 1 x i32> %vs1, iXLen %vl)
548 ret <vscale x 1 x i64> %0
551 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.vvw.nxv1i64.iXLen.nxv1i32.nxv1i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, <vscale x 1 x i32>, iXLen)
553 define <vscale x 2 x i64> @test_sf_vc_v_vvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl) {
554 ; CHECK-LABEL: test_sf_vc_v_vvw_e32m1:
555 ; CHECK: # %bb.0: # %entry
556 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
557 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
560 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, <vscale x 2 x i32> %vs1, iXLen %vl)
561 ret <vscale x 2 x i64> %0
564 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.vvw.nxv2i64.iXLen.nxv2i32.nxv2i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, <vscale x 2 x i32>, iXLen)
566 define <vscale x 4 x i64> @test_sf_vc_v_vvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl) {
567 ; CHECK-LABEL: test_sf_vc_v_vvw_e32m2:
568 ; CHECK: # %bb.0: # %entry
569 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
570 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
573 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, <vscale x 4 x i32> %vs1, iXLen %vl)
574 ret <vscale x 4 x i64> %0
577 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.vvw.nxv4i64.iXLen.nxv4i32.nxv4i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, <vscale x 4 x i32>, iXLen)
579 define <vscale x 8 x i64> @test_sf_vc_v_vvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl) {
580 ; CHECK-LABEL: test_sf_vc_v_vvw_e32m4:
581 ; CHECK: # %bb.0: # %entry
582 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
583 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
586 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, <vscale x 8 x i32> %vs1, iXLen %vl)
587 ret <vscale x 8 x i64> %0
590 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.vvw.nxv8i64.iXLen.nxv8i32.nxv8i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, <vscale x 8 x i32>, iXLen)
592 define void @test_sf_vc_xvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
593 ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf8:
594 ; CHECK: # %bb.0: # %entry
595 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
596 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
599 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
603 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i16.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
605 define void @test_sf_vc_xvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
606 ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf4:
607 ; CHECK: # %bb.0: # %entry
608 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
609 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
612 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
616 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i16.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
618 define void @test_sf_vc_xvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
619 ; CHECK-LABEL: test_sf_vc_xvw_se_e8mf2:
620 ; CHECK: # %bb.0: # %entry
621 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
622 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
625 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
629 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i16.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
631 define void @test_sf_vc_xvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
632 ; CHECK-LABEL: test_sf_vc_xvw_se_e8m1:
633 ; CHECK: # %bb.0: # %entry
634 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
635 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
638 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
642 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i16.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
644 define void @test_sf_vc_xvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
645 ; CHECK-LABEL: test_sf_vc_xvw_se_e8m2:
646 ; CHECK: # %bb.0: # %entry
647 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
648 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
651 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
655 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i16.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
657 define void @test_sf_vc_xvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
658 ; CHECK-LABEL: test_sf_vc_xvw_se_e8m4:
659 ; CHECK: # %bb.0: # %entry
660 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
661 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
664 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
668 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv32i16.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
670 define void @test_sf_vc_xvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
671 ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf4:
672 ; CHECK: # %bb.0: # %entry
673 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
674 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
677 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
681 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i32.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
683 define void @test_sf_vc_xvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
684 ; CHECK-LABEL: test_sf_vc_xvw_se_e16mf2:
685 ; CHECK: # %bb.0: # %entry
686 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
687 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
690 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
694 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i32.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
696 define void @test_sf_vc_xvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
697 ; CHECK-LABEL: test_sf_vc_xvw_se_e16m1:
698 ; CHECK: # %bb.0: # %entry
699 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
700 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
703 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
707 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i32.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
709 define void @test_sf_vc_xvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
710 ; CHECK-LABEL: test_sf_vc_xvw_se_e16m2:
711 ; CHECK: # %bb.0: # %entry
712 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
713 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
716 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
720 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i32.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
722 define void @test_sf_vc_xvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
723 ; CHECK-LABEL: test_sf_vc_xvw_se_e16m4:
724 ; CHECK: # %bb.0: # %entry
725 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
726 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
729 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
733 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16i32.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
735 define void @test_sf_vc_xvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
736 ; CHECK-LABEL: test_sf_vc_xvw_se_e32mf2:
737 ; CHECK: # %bb.0: # %entry
738 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
739 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
742 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
746 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1i64.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
748 define void @test_sf_vc_xvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
749 ; CHECK-LABEL: test_sf_vc_xvw_se_e32m1:
750 ; CHECK: # %bb.0: # %entry
751 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
752 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
755 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
759 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2i64.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
761 define void @test_sf_vc_xvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
762 ; CHECK-LABEL: test_sf_vc_xvw_se_e32m2:
763 ; CHECK: # %bb.0: # %entry
764 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
765 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
768 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
772 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4i64.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
774 define void @test_sf_vc_xvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
775 ; CHECK-LABEL: test_sf_vc_xvw_se_e32m4:
776 ; CHECK: # %bb.0: # %entry
777 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
778 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
781 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
785 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8i64.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
787 define <vscale x 1 x i16> @test_sf_vc_v_xvw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
788 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf8:
789 ; CHECK: # %bb.0: # %entry
790 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
791 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
794 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
795 ret <vscale x 1 x i16> %0
798 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
800 define <vscale x 2 x i16> @test_sf_vc_v_xvw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
801 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf4:
802 ; CHECK: # %bb.0: # %entry
803 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
804 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
807 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
808 ret <vscale x 2 x i16> %0
811 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
813 define <vscale x 4 x i16> @test_sf_vc_v_xvw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
814 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8mf2:
815 ; CHECK: # %bb.0: # %entry
816 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
817 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
820 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
821 ret <vscale x 4 x i16> %0
824 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
826 define <vscale x 8 x i16> @test_sf_vc_v_xvw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
827 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m1:
828 ; CHECK: # %bb.0: # %entry
829 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
830 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
833 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
834 ret <vscale x 8 x i16> %0
837 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
839 define <vscale x 16 x i16> @test_sf_vc_v_xvw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
840 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m2:
841 ; CHECK: # %bb.0: # %entry
842 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
843 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
846 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
847 ret <vscale x 16 x i16> %0
850 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
852 define <vscale x 32 x i16> @test_sf_vc_v_xvw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
853 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e8m4:
854 ; CHECK: # %bb.0: # %entry
855 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
856 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
859 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
860 ret <vscale x 32 x i16> %0
863 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.se.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
865 define <vscale x 1 x i32> @test_sf_vc_v_xvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
866 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf4:
867 ; CHECK: # %bb.0: # %entry
868 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
869 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
872 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
873 ret <vscale x 1 x i32> %0
876 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
878 define <vscale x 2 x i32> @test_sf_vc_v_xvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
879 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16mf2:
880 ; CHECK: # %bb.0: # %entry
881 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
882 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
885 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
886 ret <vscale x 2 x i32> %0
889 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
891 define <vscale x 4 x i32> @test_sf_vc_v_xvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
892 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m1:
893 ; CHECK: # %bb.0: # %entry
894 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
895 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
898 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
899 ret <vscale x 4 x i32> %0
902 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
904 define <vscale x 8 x i32> @test_sf_vc_v_xvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
905 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m2:
906 ; CHECK: # %bb.0: # %entry
907 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
908 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
911 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
912 ret <vscale x 8 x i32> %0
915 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
917 define <vscale x 16 x i32> @test_sf_vc_v_xvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
918 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e16m4:
919 ; CHECK: # %bb.0: # %entry
920 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
921 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
924 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
925 ret <vscale x 16 x i32> %0
928 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.se.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
930 define <vscale x 1 x i64> @test_sf_vc_v_xvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
931 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32mf2:
932 ; CHECK: # %bb.0: # %entry
933 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
934 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
937 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
938 ret <vscale x 1 x i64> %0
941 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv1i64.i32.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
943 define <vscale x 2 x i64> @test_sf_vc_v_xvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
944 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m1:
945 ; CHECK: # %bb.0: # %entry
946 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
947 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
950 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
951 ret <vscale x 2 x i64> %0
954 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv2i64.i32.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
956 define <vscale x 4 x i64> @test_sf_vc_v_xvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
957 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m2:
958 ; CHECK: # %bb.0: # %entry
959 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
960 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
963 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
964 ret <vscale x 4 x i64> %0
967 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv4i64.i32.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
969 define <vscale x 8 x i64> @test_sf_vc_v_xvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
970 ; CHECK-LABEL: test_sf_vc_v_xvw_se_e32m4:
971 ; CHECK: # %bb.0: # %entry
972 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
973 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
976 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
977 ret <vscale x 8 x i64> %0
980 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.se.nxv8i64.i32.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
982 define <vscale x 1 x i16> @test_sf_vc_v_xvw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
983 ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf8:
984 ; CHECK: # %bb.0: # %entry
985 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
986 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
989 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, i8 %rs1, iXLen %vl)
990 ret <vscale x 1 x i16> %0
993 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.xvw.nxv1i16.iXLen.nxv1i8.i8.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, i8, iXLen)
995 define <vscale x 2 x i16> @test_sf_vc_v_xvw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
996 ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf4:
997 ; CHECK: # %bb.0: # %entry
998 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, ma
999 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1002 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, i8 %rs1, iXLen %vl)
1003 ret <vscale x 2 x i16> %0
1006 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.xvw.nxv2i16.iXLen.nxv2i8.i8.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, i8, iXLen)
1008 define <vscale x 4 x i16> @test_sf_vc_v_xvw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1009 ; CHECK-LABEL: test_sf_vc_v_xvw_e8mf2:
1010 ; CHECK: # %bb.0: # %entry
1011 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma
1012 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1015 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, i8 %rs1, iXLen %vl)
1016 ret <vscale x 4 x i16> %0
1019 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.xvw.nxv4i16.iXLen.nxv4i8.i8.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, i8, iXLen)
1021 define <vscale x 8 x i16> @test_sf_vc_v_xvw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1022 ; CHECK-LABEL: test_sf_vc_v_xvw_e8m1:
1023 ; CHECK: # %bb.0: # %entry
1024 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma
1025 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
1028 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, i8 %rs1, iXLen %vl)
1029 ret <vscale x 8 x i16> %0
1032 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.xvw.nxv8i16.iXLen.nxv8i8.i8.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, i8, iXLen)
1034 define <vscale x 16 x i16> @test_sf_vc_v_xvw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1035 ; CHECK-LABEL: test_sf_vc_v_xvw_e8m2:
1036 ; CHECK: # %bb.0: # %entry
1037 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, tu, ma
1038 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
1041 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, i8 %rs1, iXLen %vl)
1042 ret <vscale x 16 x i16> %0
1045 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.xvw.nxv16i16.iXLen.nxv16i8.i8.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, i8, iXLen)
1047 define <vscale x 32 x i16> @test_sf_vc_v_xvw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 zeroext %rs1, iXLen %vl) {
1048 ; CHECK-LABEL: test_sf_vc_v_xvw_e8m4:
1049 ; CHECK: # %bb.0: # %entry
1050 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, tu, ma
1051 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
1054 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, i8 %rs1, iXLen %vl)
1055 ret <vscale x 32 x i16> %0
1058 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.xvw.nxv32i16.iXLen.nxv32i8.i8.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, i8, iXLen)
1060 define <vscale x 1 x i32> @test_sf_vc_v_xvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1061 ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf4:
1062 ; CHECK: # %bb.0: # %entry
1063 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
1064 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1067 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, i16 %rs1, iXLen %vl)
1068 ret <vscale x 1 x i32> %0
1071 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.xvw.nxv1i32.iXLen.nxv1i16.i16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, i16, iXLen)
1073 define <vscale x 2 x i32> @test_sf_vc_v_xvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1074 ; CHECK-LABEL: test_sf_vc_v_xvw_e16mf2:
1075 ; CHECK: # %bb.0: # %entry
1076 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
1077 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1080 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, i16 %rs1, iXLen %vl)
1081 ret <vscale x 2 x i32> %0
1084 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.xvw.nxv2i32.iXLen.nxv2i16.i16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, i16, iXLen)
1086 define <vscale x 4 x i32> @test_sf_vc_v_xvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1087 ; CHECK-LABEL: test_sf_vc_v_xvw_e16m1:
1088 ; CHECK: # %bb.0: # %entry
1089 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
1090 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
1093 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, i16 %rs1, iXLen %vl)
1094 ret <vscale x 4 x i32> %0
1097 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.xvw.nxv4i32.iXLen.nxv4i16.i16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, i16, iXLen)
1099 define <vscale x 8 x i32> @test_sf_vc_v_xvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1100 ; CHECK-LABEL: test_sf_vc_v_xvw_e16m2:
1101 ; CHECK: # %bb.0: # %entry
1102 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
1103 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
1106 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, i16 %rs1, iXLen %vl)
1107 ret <vscale x 8 x i32> %0
1110 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.xvw.nxv8i32.iXLen.nxv8i16.i16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, i16, iXLen)
1112 define <vscale x 16 x i32> @test_sf_vc_v_xvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 zeroext %rs1, iXLen %vl) {
1113 ; CHECK-LABEL: test_sf_vc_v_xvw_e16m4:
1114 ; CHECK: # %bb.0: # %entry
1115 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
1116 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
1119 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, i16 %rs1, iXLen %vl)
1120 ret <vscale x 16 x i32> %0
1123 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.xvw.nxv16i32.iXLen.nxv16i16.i16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, i16, iXLen)
1125 define <vscale x 1 x i64> @test_sf_vc_v_xvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1126 ; CHECK-LABEL: test_sf_vc_v_xvw_e32mf2:
1127 ; CHECK: # %bb.0: # %entry
1128 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
1129 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
1132 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, i32 %rs1, iXLen %vl)
1133 ret <vscale x 1 x i64> %0
1136 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.xvw.nxv1i64.iXLen.nxv1i32.i32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, i32, iXLen)
1138 define <vscale x 2 x i64> @test_sf_vc_v_xvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1139 ; CHECK-LABEL: test_sf_vc_v_xvw_e32m1:
1140 ; CHECK: # %bb.0: # %entry
1141 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
1142 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
1145 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, i32 %rs1, iXLen %vl)
1146 ret <vscale x 2 x i64> %0
1149 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.xvw.nxv2i64.iXLen.nxv2i32.i32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, i32, iXLen)
1151 define <vscale x 4 x i64> @test_sf_vc_v_xvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1152 ; CHECK-LABEL: test_sf_vc_v_xvw_e32m2:
1153 ; CHECK: # %bb.0: # %entry
1154 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
1155 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
1158 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, i32 %rs1, iXLen %vl)
1159 ret <vscale x 4 x i64> %0
1162 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.xvw.nxv4i64.iXLen.nxv4i32.i32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, i32, iXLen)
1164 define <vscale x 8 x i64> @test_sf_vc_v_xvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 signext %rs1, iXLen %vl) {
1165 ; CHECK-LABEL: test_sf_vc_v_xvw_e32m4:
1166 ; CHECK: # %bb.0: # %entry
1167 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
1168 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
1171 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, i32 %rs1, iXLen %vl)
1172 ret <vscale x 8 x i64> %0
1175 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.xvw.nxv8i64.iXLen.nxv8i32.i32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, i32, iXLen)
1177 define void @test_sf_vc_ivw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1178 ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf8:
1179 ; CHECK: # %bb.0: # %entry
1180 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
1181 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1184 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1188 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i16.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
1190 define void @test_sf_vc_ivw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1191 ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf4:
1192 ; CHECK: # %bb.0: # %entry
1193 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
1194 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1197 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1201 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i16.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
1203 define void @test_sf_vc_ivw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1204 ; CHECK-LABEL: test_sf_vc_ivw_se_e8mf2:
1205 ; CHECK: # %bb.0: # %entry
1206 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
1207 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1210 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1214 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i16.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
1216 define void @test_sf_vc_ivw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1217 ; CHECK-LABEL: test_sf_vc_ivw_se_e8m1:
1218 ; CHECK: # %bb.0: # %entry
1219 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
1220 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
1223 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1227 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i16.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
1229 define void @test_sf_vc_ivw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1230 ; CHECK-LABEL: test_sf_vc_ivw_se_e8m2:
1231 ; CHECK: # %bb.0: # %entry
1232 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
1233 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
1236 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1240 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i16.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
1242 define void @test_sf_vc_ivw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1243 ; CHECK-LABEL: test_sf_vc_ivw_se_e8m4:
1244 ; CHECK: # %bb.0: # %entry
1245 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
1246 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
1249 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1253 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv32i16.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
1255 define void @test_sf_vc_ivw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1256 ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf4:
1257 ; CHECK: # %bb.0: # %entry
1258 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1259 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1262 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1266 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i32.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
1268 define void @test_sf_vc_ivw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1269 ; CHECK-LABEL: test_sf_vc_ivw_se_e16mf2:
1270 ; CHECK: # %bb.0: # %entry
1271 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1272 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1275 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1279 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i32.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
1281 define void @test_sf_vc_ivw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1282 ; CHECK-LABEL: test_sf_vc_ivw_se_e16m1:
1283 ; CHECK: # %bb.0: # %entry
1284 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1285 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
1288 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1292 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i32.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
1294 define void @test_sf_vc_ivw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1295 ; CHECK-LABEL: test_sf_vc_ivw_se_e16m2:
1296 ; CHECK: # %bb.0: # %entry
1297 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1298 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
1301 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1305 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i32.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
1307 define void @test_sf_vc_ivw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1308 ; CHECK-LABEL: test_sf_vc_ivw_se_e16m4:
1309 ; CHECK: # %bb.0: # %entry
1310 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1311 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
1314 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1318 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16i32.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
1320 define void @test_sf_vc_ivw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1321 ; CHECK-LABEL: test_sf_vc_ivw_se_e32mf2:
1322 ; CHECK: # %bb.0: # %entry
1323 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1324 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
1327 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1331 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1i64.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
1333 define void @test_sf_vc_ivw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1334 ; CHECK-LABEL: test_sf_vc_ivw_se_e32m1:
1335 ; CHECK: # %bb.0: # %entry
1336 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1337 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
1340 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1344 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2i64.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
1346 define void @test_sf_vc_ivw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1347 ; CHECK-LABEL: test_sf_vc_ivw_se_e32m2:
1348 ; CHECK: # %bb.0: # %entry
1349 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1350 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
1353 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1357 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4i64.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
1359 define void @test_sf_vc_ivw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1360 ; CHECK-LABEL: test_sf_vc_ivw_se_e32m4:
1361 ; CHECK: # %bb.0: # %entry
1362 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1363 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
1366 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1370 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8i64.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
1372 define <vscale x 1 x i16> @test_sf_vc_v_ivw_se_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1373 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf8:
1374 ; CHECK: # %bb.0: # %entry
1375 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1376 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1379 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1380 ret <vscale x 1 x i16> %0
1383 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
1385 define <vscale x 2 x i16> @test_sf_vc_v_ivw_se_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1386 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf4:
1387 ; CHECK: # %bb.0: # %entry
1388 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
1389 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1392 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1393 ret <vscale x 2 x i16> %0
1396 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
1398 define <vscale x 4 x i16> @test_sf_vc_v_ivw_se_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1399 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8mf2:
1400 ; CHECK: # %bb.0: # %entry
1401 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
1402 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1405 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1406 ret <vscale x 4 x i16> %0
1409 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
1411 define <vscale x 8 x i16> @test_sf_vc_v_ivw_se_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1412 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m1:
1413 ; CHECK: # %bb.0: # %entry
1414 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
1415 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1418 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1419 ret <vscale x 8 x i16> %0
1422 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
1424 define <vscale x 16 x i16> @test_sf_vc_v_ivw_se_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1425 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m2:
1426 ; CHECK: # %bb.0: # %entry
1427 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
1428 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1431 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1432 ret <vscale x 16 x i16> %0
1435 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
1437 define <vscale x 32 x i16> @test_sf_vc_v_ivw_se_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1438 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e8m4:
1439 ; CHECK: # %bb.0: # %entry
1440 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
1441 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1444 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1445 ret <vscale x 32 x i16> %0
1448 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.se.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
1450 define <vscale x 1 x i32> @test_sf_vc_v_ivw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1451 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf4:
1452 ; CHECK: # %bb.0: # %entry
1453 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1454 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1457 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1458 ret <vscale x 1 x i32> %0
1461 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
1463 define <vscale x 2 x i32> @test_sf_vc_v_ivw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1464 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16mf2:
1465 ; CHECK: # %bb.0: # %entry
1466 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
1467 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1470 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1471 ret <vscale x 2 x i32> %0
1474 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
1476 define <vscale x 4 x i32> @test_sf_vc_v_ivw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1477 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m1:
1478 ; CHECK: # %bb.0: # %entry
1479 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
1480 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1483 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1484 ret <vscale x 4 x i32> %0
1487 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
1489 define <vscale x 8 x i32> @test_sf_vc_v_ivw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1490 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m2:
1491 ; CHECK: # %bb.0: # %entry
1492 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
1493 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1496 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1497 ret <vscale x 8 x i32> %0
1500 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
1502 define <vscale x 16 x i32> @test_sf_vc_v_ivw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1503 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e16m4:
1504 ; CHECK: # %bb.0: # %entry
1505 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
1506 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1509 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1510 ret <vscale x 16 x i32> %0
1513 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.se.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
1515 define <vscale x 1 x i64> @test_sf_vc_v_ivw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1516 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32mf2:
1517 ; CHECK: # %bb.0: # %entry
1518 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
1519 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1522 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1523 ret <vscale x 1 x i64> %0
1526 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
1528 define <vscale x 2 x i64> @test_sf_vc_v_ivw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1529 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m1:
1530 ; CHECK: # %bb.0: # %entry
1531 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1532 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1535 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1536 ret <vscale x 2 x i64> %0
1539 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
1541 define <vscale x 4 x i64> @test_sf_vc_v_ivw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1542 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m2:
1543 ; CHECK: # %bb.0: # %entry
1544 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
1545 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1548 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1549 ret <vscale x 4 x i64> %0
1552 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
1554 define <vscale x 8 x i64> @test_sf_vc_v_ivw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1555 ; CHECK-LABEL: test_sf_vc_v_ivw_se_e32m4:
1556 ; CHECK: # %bb.0: # %entry
1557 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
1558 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1561 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1562 ret <vscale x 8 x i64> %0
1565 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.se.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
1567 define <vscale x 1 x i16> @test_sf_vc_v_ivw_e8mf8(<vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen %vl) {
1568 ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf8:
1569 ; CHECK: # %bb.0: # %entry
1570 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, ma
1571 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1574 %0 = tail call <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen 3, <vscale x 1 x i16> %vd, <vscale x 1 x i8> %vs2, iXLen 10, iXLen %vl)
1575 ret <vscale x 1 x i16> %0
1578 declare <vscale x 1 x i16> @llvm.riscv.sf.vc.v.ivw.nxv1i16.iXLen.nxv1i8.iXLen.iXLen(iXLen, <vscale x 1 x i16>, <vscale x 1 x i8>, iXLen, iXLen)
1580 define <vscale x 2 x i16> @test_sf_vc_v_ivw_e8mf4(<vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen %vl) {
1581 ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf4:
1582 ; CHECK: # %bb.0: # %entry
1583 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, ma
1584 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1587 %0 = tail call <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen 3, <vscale x 2 x i16> %vd, <vscale x 2 x i8> %vs2, iXLen 10, iXLen %vl)
1588 ret <vscale x 2 x i16> %0
1591 declare <vscale x 2 x i16> @llvm.riscv.sf.vc.v.ivw.nxv2i16.iXLen.nxv2i8.iXLen.iXLen(iXLen, <vscale x 2 x i16>, <vscale x 2 x i8>, iXLen, iXLen)
1593 define <vscale x 4 x i16> @test_sf_vc_v_ivw_e8mf2(<vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen %vl) {
1594 ; CHECK-LABEL: test_sf_vc_v_ivw_e8mf2:
1595 ; CHECK: # %bb.0: # %entry
1596 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
1597 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1600 %0 = tail call <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen 3, <vscale x 4 x i16> %vd, <vscale x 4 x i8> %vs2, iXLen 10, iXLen %vl)
1601 ret <vscale x 4 x i16> %0
1604 declare <vscale x 4 x i16> @llvm.riscv.sf.vc.v.ivw.nxv4i16.iXLen.nxv4i8.iXLen.iXLen(iXLen, <vscale x 4 x i16>, <vscale x 4 x i8>, iXLen, iXLen)
1606 define <vscale x 8 x i16> @test_sf_vc_v_ivw_e8m1(<vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen %vl) {
1607 ; CHECK-LABEL: test_sf_vc_v_ivw_e8m1:
1608 ; CHECK: # %bb.0: # %entry
1609 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma
1610 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1613 %0 = tail call <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen 3, <vscale x 8 x i16> %vd, <vscale x 8 x i8> %vs2, iXLen 10, iXLen %vl)
1614 ret <vscale x 8 x i16> %0
1617 declare <vscale x 8 x i16> @llvm.riscv.sf.vc.v.ivw.nxv8i16.iXLen.nxv8i8.iXLen.iXLen(iXLen, <vscale x 8 x i16>, <vscale x 8 x i8>, iXLen, iXLen)
1619 define <vscale x 16 x i16> @test_sf_vc_v_ivw_e8m2(<vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen %vl) {
1620 ; CHECK-LABEL: test_sf_vc_v_ivw_e8m2:
1621 ; CHECK: # %bb.0: # %entry
1622 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, ma
1623 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1626 %0 = tail call <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen 3, <vscale x 16 x i16> %vd, <vscale x 16 x i8> %vs2, iXLen 10, iXLen %vl)
1627 ret <vscale x 16 x i16> %0
1630 declare <vscale x 16 x i16> @llvm.riscv.sf.vc.v.ivw.nxv16i16.iXLen.nxv16i8.iXLen.iXLen(iXLen, <vscale x 16 x i16>, <vscale x 16 x i8>, iXLen, iXLen)
1632 define <vscale x 32 x i16> @test_sf_vc_v_ivw_e8m4(<vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen %vl) {
1633 ; CHECK-LABEL: test_sf_vc_v_ivw_e8m4:
1634 ; CHECK: # %bb.0: # %entry
1635 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, ma
1636 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1639 %0 = tail call <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen 3, <vscale x 32 x i16> %vd, <vscale x 32 x i8> %vs2, iXLen 10, iXLen %vl)
1640 ret <vscale x 32 x i16> %0
1643 declare <vscale x 32 x i16> @llvm.riscv.sf.vc.v.ivw.nxv32i16.iXLen.nxv32i8.iXLen.iXLen(iXLen, <vscale x 32 x i16>, <vscale x 32 x i8>, iXLen, iXLen)
1645 define <vscale x 1 x i32> @test_sf_vc_v_ivw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen %vl) {
1646 ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf4:
1647 ; CHECK: # %bb.0: # %entry
1648 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1649 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1652 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen 3, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, iXLen 10, iXLen %vl)
1653 ret <vscale x 1 x i32> %0
1656 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.ivw.nxv1i32.iXLen.nxv1i16.iXLen.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, iXLen, iXLen)
1658 define <vscale x 2 x i32> @test_sf_vc_v_ivw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen %vl) {
1659 ; CHECK-LABEL: test_sf_vc_v_ivw_e16mf2:
1660 ; CHECK: # %bb.0: # %entry
1661 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
1662 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1665 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen 3, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, iXLen 10, iXLen %vl)
1666 ret <vscale x 2 x i32> %0
1669 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.ivw.nxv2i32.iXLen.nxv2i16.iXLen.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, iXLen, iXLen)
1671 define <vscale x 4 x i32> @test_sf_vc_v_ivw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen %vl) {
1672 ; CHECK-LABEL: test_sf_vc_v_ivw_e16m1:
1673 ; CHECK: # %bb.0: # %entry
1674 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
1675 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1678 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen 3, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, iXLen 10, iXLen %vl)
1679 ret <vscale x 4 x i32> %0
1682 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.ivw.nxv4i32.iXLen.nxv4i16.iXLen.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, iXLen, iXLen)
1684 define <vscale x 8 x i32> @test_sf_vc_v_ivw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen %vl) {
1685 ; CHECK-LABEL: test_sf_vc_v_ivw_e16m2:
1686 ; CHECK: # %bb.0: # %entry
1687 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
1688 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1691 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen 3, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, iXLen 10, iXLen %vl)
1692 ret <vscale x 8 x i32> %0
1695 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.ivw.nxv8i32.iXLen.nxv8i16.iXLen.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, iXLen, iXLen)
1697 define <vscale x 16 x i32> @test_sf_vc_v_ivw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen %vl) {
1698 ; CHECK-LABEL: test_sf_vc_v_ivw_e16m4:
1699 ; CHECK: # %bb.0: # %entry
1700 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
1701 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1704 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen 3, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, iXLen 10, iXLen %vl)
1705 ret <vscale x 16 x i32> %0
1708 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.ivw.nxv16i32.iXLen.nxv16i16.iXLen.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, iXLen, iXLen)
1710 define <vscale x 1 x i64> @test_sf_vc_v_ivw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen %vl) {
1711 ; CHECK-LABEL: test_sf_vc_v_ivw_e32mf2:
1712 ; CHECK: # %bb.0: # %entry
1713 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
1714 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
1717 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen 3, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, iXLen 10, iXLen %vl)
1718 ret <vscale x 1 x i64> %0
1721 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.ivw.nxv1i64.iXLen.nxv1i32.iXLen.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, iXLen, iXLen)
1723 define <vscale x 2 x i64> @test_sf_vc_v_ivw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen %vl) {
1724 ; CHECK-LABEL: test_sf_vc_v_ivw_e32m1:
1725 ; CHECK: # %bb.0: # %entry
1726 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1727 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
1730 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen 3, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, iXLen 10, iXLen %vl)
1731 ret <vscale x 2 x i64> %0
1734 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.ivw.nxv2i64.iXLen.nxv2i32.iXLen.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, iXLen, iXLen)
1736 define <vscale x 4 x i64> @test_sf_vc_v_ivw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen %vl) {
1737 ; CHECK-LABEL: test_sf_vc_v_ivw_e32m2:
1738 ; CHECK: # %bb.0: # %entry
1739 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
1740 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
1743 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen 3, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, iXLen 10, iXLen %vl)
1744 ret <vscale x 4 x i64> %0
1747 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.ivw.nxv4i64.iXLen.nxv4i32.iXLen.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, iXLen, iXLen)
1749 define <vscale x 8 x i64> @test_sf_vc_v_ivw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen %vl) {
1750 ; CHECK-LABEL: test_sf_vc_v_ivw_e32m4:
1751 ; CHECK: # %bb.0: # %entry
1752 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
1753 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
1756 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen 3, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, iXLen 10, iXLen %vl)
1757 ret <vscale x 8 x i64> %0
1760 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.ivw.nxv8i64.iXLen.nxv8i32.iXLen.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, iXLen, iXLen)
1762 define void @test_sf_vc_fvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
1763 ; CHECK-LABEL: test_sf_vc_fvw_se_e16mf4:
1764 ; CHECK: # %bb.0: # %entry
1765 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
1766 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
1769 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
1773 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i32.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
1775 define void @test_sf_vc_fvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
1776 ; CHECK-LABEL: test_sf_vc_fvw_se_e16mf2:
1777 ; CHECK: # %bb.0: # %entry
1778 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
1779 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
1782 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
1786 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i32.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
1788 define void @test_sf_vc_fvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
1789 ; CHECK-LABEL: test_sf_vc_fvw_se_e16m1:
1790 ; CHECK: # %bb.0: # %entry
1791 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
1792 ; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
1795 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
1799 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i32.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
1801 define void @test_sf_vc_fvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
1802 ; CHECK-LABEL: test_sf_vc_fvw_se_e16m2:
1803 ; CHECK: # %bb.0: # %entry
1804 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
1805 ; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
1808 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
1812 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i32.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
1814 define void @test_sf_vc_fvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
1815 ; CHECK-LABEL: test_sf_vc_fvw_se_e16m4:
1816 ; CHECK: # %bb.0: # %entry
1817 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
1818 ; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0
1821 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
1825 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16i32.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
1827 define void @test_sf_vc_fvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
1828 ; CHECK-LABEL: test_sf_vc_fvw_se_e32mf2:
1829 ; CHECK: # %bb.0: # %entry
1830 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
1831 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
1834 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
1838 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1i64.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
1840 define void @test_sf_vc_fvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
1841 ; CHECK-LABEL: test_sf_vc_fvw_se_e32m1:
1842 ; CHECK: # %bb.0: # %entry
1843 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
1844 ; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
1847 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
1851 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2i64.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
1853 define void @test_sf_vc_fvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
1854 ; CHECK-LABEL: test_sf_vc_fvw_se_e32m2:
1855 ; CHECK: # %bb.0: # %entry
1856 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
1857 ; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
1860 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
1864 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4i64.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
1866 define void @test_sf_vc_fvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
1867 ; CHECK-LABEL: test_sf_vc_fvw_se_e32m4:
1868 ; CHECK: # %bb.0: # %entry
1869 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
1870 ; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0
1873 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
1877 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8i64.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
1879 define <vscale x 1 x i32> @test_sf_vc_v_fvw_se_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
1880 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf4:
1881 ; CHECK: # %bb.0: # %entry
1882 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
1883 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
1886 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
1887 ret <vscale x 1 x i32> %0
1890 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
1892 define <vscale x 2 x i32> @test_sf_vc_v_fvw_se_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
1893 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e16mf2:
1894 ; CHECK: # %bb.0: # %entry
1895 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
1896 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
1899 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
1900 ret <vscale x 2 x i32> %0
1903 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
1905 define <vscale x 4 x i32> @test_sf_vc_v_fvw_se_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
1906 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m1:
1907 ; CHECK: # %bb.0: # %entry
1908 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
1909 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
1912 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
1913 ret <vscale x 4 x i32> %0
1916 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
1918 define <vscale x 8 x i32> @test_sf_vc_v_fvw_se_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
1919 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m2:
1920 ; CHECK: # %bb.0: # %entry
1921 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
1922 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
1925 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
1926 ret <vscale x 8 x i32> %0
1929 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
1931 define <vscale x 16 x i32> @test_sf_vc_v_fvw_se_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
1932 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e16m4:
1933 ; CHECK: # %bb.0: # %entry
1934 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
1935 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
1938 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
1939 ret <vscale x 16 x i32> %0
1942 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.se.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
1944 define <vscale x 1 x i64> @test_sf_vc_v_fvw_se_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
1945 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e32mf2:
1946 ; CHECK: # %bb.0: # %entry
1947 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
1948 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
1951 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
1952 ret <vscale x 1 x i64> %0
1955 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
1957 define <vscale x 2 x i64> @test_sf_vc_v_fvw_se_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
1958 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m1:
1959 ; CHECK: # %bb.0: # %entry
1960 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
1961 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
1964 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
1965 ret <vscale x 2 x i64> %0
1968 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
1970 define <vscale x 4 x i64> @test_sf_vc_v_fvw_se_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
1971 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m2:
1972 ; CHECK: # %bb.0: # %entry
1973 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
1974 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
1977 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
1978 ret <vscale x 4 x i64> %0
1981 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
1983 define <vscale x 8 x i64> @test_sf_vc_v_fvw_se_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
1984 ; CHECK-LABEL: test_sf_vc_v_fvw_se_e32m4:
1985 ; CHECK: # %bb.0: # %entry
1986 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
1987 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
1990 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
1991 ret <vscale x 8 x i64> %0
1994 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.se.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
1996 define <vscale x 1 x i32> @test_sf_vc_v_fvw_e16mf4(<vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl) {
1997 ; CHECK-LABEL: test_sf_vc_v_fvw_e16mf4:
1998 ; CHECK: # %bb.0: # %entry
1999 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2000 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
2003 %0 = tail call <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen 1, <vscale x 1 x i32> %vd, <vscale x 1 x i16> %vs2, half %fs1, iXLen %vl)
2004 ret <vscale x 1 x i32> %0
2007 declare <vscale x 1 x i32> @llvm.riscv.sf.vc.v.fvw.nxv1i32.iXLen.nxv1i16.f16.iXLen(iXLen, <vscale x 1 x i32>, <vscale x 1 x i16>, half, iXLen)
2009 define <vscale x 2 x i32> @test_sf_vc_v_fvw_e16mf2(<vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl) {
2010 ; CHECK-LABEL: test_sf_vc_v_fvw_e16mf2:
2011 ; CHECK: # %bb.0: # %entry
2012 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
2013 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
2016 %0 = tail call <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen 1, <vscale x 2 x i32> %vd, <vscale x 2 x i16> %vs2, half %fs1, iXLen %vl)
2017 ret <vscale x 2 x i32> %0
2020 declare <vscale x 2 x i32> @llvm.riscv.sf.vc.v.fvw.nxv2i32.iXLen.nxv2i16.f16.iXLen(iXLen, <vscale x 2 x i32>, <vscale x 2 x i16>, half, iXLen)
2022 define <vscale x 4 x i32> @test_sf_vc_v_fvw_e16m1(<vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl) {
2023 ; CHECK-LABEL: test_sf_vc_v_fvw_e16m1:
2024 ; CHECK: # %bb.0: # %entry
2025 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
2026 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
2029 %0 = tail call <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen 1, <vscale x 4 x i32> %vd, <vscale x 4 x i16> %vs2, half %fs1, iXLen %vl)
2030 ret <vscale x 4 x i32> %0
2033 declare <vscale x 4 x i32> @llvm.riscv.sf.vc.v.fvw.nxv4i32.iXLen.nxv4i16.f16.iXLen(iXLen, <vscale x 4 x i32>, <vscale x 4 x i16>, half, iXLen)
2035 define <vscale x 8 x i32> @test_sf_vc_v_fvw_e16m2(<vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl) {
2036 ; CHECK-LABEL: test_sf_vc_v_fvw_e16m2:
2037 ; CHECK: # %bb.0: # %entry
2038 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
2039 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
2042 %0 = tail call <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen 1, <vscale x 8 x i32> %vd, <vscale x 8 x i16> %vs2, half %fs1, iXLen %vl)
2043 ret <vscale x 8 x i32> %0
2046 declare <vscale x 8 x i32> @llvm.riscv.sf.vc.v.fvw.nxv8i32.iXLen.nxv8i16.f16.iXLen(iXLen, <vscale x 8 x i32>, <vscale x 8 x i16>, half, iXLen)
2048 define <vscale x 16 x i32> @test_sf_vc_v_fvw_e16m4(<vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl) {
2049 ; CHECK-LABEL: test_sf_vc_v_fvw_e16m4:
2050 ; CHECK: # %bb.0: # %entry
2051 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
2052 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
2055 %0 = tail call <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen 1, <vscale x 16 x i32> %vd, <vscale x 16 x i16> %vs2, half %fs1, iXLen %vl)
2056 ret <vscale x 16 x i32> %0
2059 declare <vscale x 16 x i32> @llvm.riscv.sf.vc.v.fvw.nxv16i32.iXLen.nxv16i16.f16.iXLen(iXLen, <vscale x 16 x i32>, <vscale x 16 x i16>, half, iXLen)
2061 define <vscale x 1 x i64> @test_sf_vc_v_fvw_e32mf2(<vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl) {
2062 ; CHECK-LABEL: test_sf_vc_v_fvw_e32mf2:
2063 ; CHECK: # %bb.0: # %entry
2064 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
2065 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
2068 %0 = tail call <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen 1, <vscale x 1 x i64> %vd, <vscale x 1 x i32> %vs2, float %fs1, iXLen %vl)
2069 ret <vscale x 1 x i64> %0
2072 declare <vscale x 1 x i64> @llvm.riscv.sf.vc.v.fvw.nxv1i64.iXLen.nxv1i32.f32.iXLen(iXLen, <vscale x 1 x i64>, <vscale x 1 x i32>, float, iXLen)
2074 define <vscale x 2 x i64> @test_sf_vc_v_fvw_e32m1(<vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl) {
2075 ; CHECK-LABEL: test_sf_vc_v_fvw_e32m1:
2076 ; CHECK: # %bb.0: # %entry
2077 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
2078 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
2081 %0 = tail call <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen 1, <vscale x 2 x i64> %vd, <vscale x 2 x i32> %vs2, float %fs1, iXLen %vl)
2082 ret <vscale x 2 x i64> %0
2085 declare <vscale x 2 x i64> @llvm.riscv.sf.vc.v.fvw.nxv2i64.iXLen.nxv2i32.f32.iXLen(iXLen, <vscale x 2 x i64>, <vscale x 2 x i32>, float, iXLen)
2087 define <vscale x 4 x i64> @test_sf_vc_v_fvw_e32m2(<vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl) {
2088 ; CHECK-LABEL: test_sf_vc_v_fvw_e32m2:
2089 ; CHECK: # %bb.0: # %entry
2090 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
2091 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
2094 %0 = tail call <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen 1, <vscale x 4 x i64> %vd, <vscale x 4 x i32> %vs2, float %fs1, iXLen %vl)
2095 ret <vscale x 4 x i64> %0
2098 declare <vscale x 4 x i64> @llvm.riscv.sf.vc.v.fvw.nxv4i64.iXLen.nxv4i32.f32.iXLen(iXLen, <vscale x 4 x i64>, <vscale x 4 x i32>, float, iXLen)
2100 define <vscale x 8 x i64> @test_sf_vc_v_fvw_e32m4(<vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl) {
2101 ; CHECK-LABEL: test_sf_vc_v_fvw_e32m4:
2102 ; CHECK: # %bb.0: # %entry
2103 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
2104 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
2107 %0 = tail call <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen 1, <vscale x 8 x i64> %vd, <vscale x 8 x i32> %vs2, float %fs1, iXLen %vl)
2108 ret <vscale x 8 x i64> %0
2111 declare <vscale x 8 x i64> @llvm.riscv.sf.vc.v.fvw.nxv8i64.iXLen.nxv8i32.f32.iXLen(iXLen, <vscale x 8 x i64>, <vscale x 8 x i32>, float, iXLen)
2113 define void @test_f_sf_vc_vvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
2114 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e16mf4:
2115 ; CHECK: # %bb.0: # %entry
2116 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2117 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
2120 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
2124 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f32.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
2126 define void @test_f_sf_vc_vvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
2127 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e16mf2:
2128 ; CHECK: # %bb.0: # %entry
2129 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2130 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
2133 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
2137 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f32.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
2139 define void @test_f_sf_vc_vvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
2140 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m1:
2141 ; CHECK: # %bb.0: # %entry
2142 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2143 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
2146 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
2150 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f32.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
2152 define void @test_f_sf_vc_vvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
2153 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m2:
2154 ; CHECK: # %bb.0: # %entry
2155 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2156 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
2159 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
2163 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f32.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
2165 define void @test_f_sf_vc_vvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
2166 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e16m4:
2167 ; CHECK: # %bb.0: # %entry
2168 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2169 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
2172 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
2176 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv16f32.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
2178 define void @test_f_sf_vc_vvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
2179 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e32mf2:
2180 ; CHECK: # %bb.0: # %entry
2181 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2182 ; CHECK-NEXT: sf.vc.vvw 3, v8, v9, v10
2185 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
2189 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv1f64.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
2191 define void @test_f_sf_vc_vvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
2192 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m1:
2193 ; CHECK: # %bb.0: # %entry
2194 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2195 ; CHECK-NEXT: sf.vc.vvw 3, v8, v10, v11
2198 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
2202 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv2f64.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
2204 define void @test_f_sf_vc_vvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
2205 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m2:
2206 ; CHECK: # %bb.0: # %entry
2207 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2208 ; CHECK-NEXT: sf.vc.vvw 3, v8, v12, v14
2211 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
2215 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv4f64.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
2217 define void @test_f_sf_vc_vvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
2218 ; CHECK-LABEL: test_f_sf_vc_vvw_se_e32m4:
2219 ; CHECK: # %bb.0: # %entry
2220 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2221 ; CHECK-NEXT: sf.vc.vvw 3, v8, v16, v20
2224 tail call void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
2228 declare void @llvm.riscv.sf.vc.vvw.se.iXLen.nxv8f64.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
2230 define <vscale x 1 x float> @test_f_sf_vc_v_vvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
2231 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16mf4:
2232 ; CHECK: # %bb.0: # %entry
2233 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2234 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
2237 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
2238 ret <vscale x 1 x float> %0
2241 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
2243 define <vscale x 2 x float> @test_f_sf_vc_v_vvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
2244 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16mf2:
2245 ; CHECK: # %bb.0: # %entry
2246 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
2247 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
2250 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
2251 ret <vscale x 2 x float> %0
2254 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
2256 define <vscale x 4 x float> @test_f_sf_vc_v_vvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
2257 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m1:
2258 ; CHECK: # %bb.0: # %entry
2259 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
2260 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
2263 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
2264 ret <vscale x 4 x float> %0
2267 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
2269 define <vscale x 8 x float> @test_f_sf_vc_v_vvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
2270 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m2:
2271 ; CHECK: # %bb.0: # %entry
2272 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
2273 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
2276 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
2277 ret <vscale x 8 x float> %0
2280 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
2282 define <vscale x 16 x float> @test_f_sf_vc_v_vvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
2283 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e16m4:
2284 ; CHECK: # %bb.0: # %entry
2285 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
2286 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
2289 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
2290 ret <vscale x 16 x float> %0
2293 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.se.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
2295 define <vscale x 1 x double> @test_f_sf_vc_v_vvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
2296 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32mf2:
2297 ; CHECK: # %bb.0: # %entry
2298 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
2299 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
2302 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
2303 ret <vscale x 1 x double> %0
2306 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
2308 define <vscale x 2 x double> @test_f_sf_vc_v_vvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
2309 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m1:
2310 ; CHECK: # %bb.0: # %entry
2311 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
2312 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
2315 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
2316 ret <vscale x 2 x double> %0
2319 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
2321 define <vscale x 4 x double> @test_f_sf_vc_v_vvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
2322 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m2:
2323 ; CHECK: # %bb.0: # %entry
2324 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
2325 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
2328 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
2329 ret <vscale x 4 x double> %0
2332 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
2334 define <vscale x 8 x double> @test_f_sf_vc_v_vvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
2335 ; CHECK-LABEL: test_f_sf_vc_v_vvw_se_e32m4:
2336 ; CHECK: # %bb.0: # %entry
2337 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
2338 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
2341 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
2342 ret <vscale x 8 x double> %0
2345 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.se.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
2347 define <vscale x 1 x float> @test_f_sf_vc_v_vvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl) {
2348 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e16mf4:
2349 ; CHECK: # %bb.0: # %entry
2350 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2351 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
2354 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, <vscale x 1 x half> %vs1, iXLen %vl)
2355 ret <vscale x 1 x float> %0
2358 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.vvw.nxv1f32.iXLen.nxv1f16.nxv1f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, <vscale x 1 x half>, iXLen)
2360 define <vscale x 2 x float> @test_f_sf_vc_v_vvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl) {
2361 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e16mf2:
2362 ; CHECK: # %bb.0: # %entry
2363 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
2364 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
2367 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, <vscale x 2 x half> %vs1, iXLen %vl)
2368 ret <vscale x 2 x float> %0
2371 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.vvw.nxv2f32.iXLen.nxv2f16.nxv2f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, <vscale x 2 x half>, iXLen)
2373 define <vscale x 4 x float> @test_f_sf_vc_v_vvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl) {
2374 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m1:
2375 ; CHECK: # %bb.0: # %entry
2376 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
2377 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
2380 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, <vscale x 4 x half> %vs1, iXLen %vl)
2381 ret <vscale x 4 x float> %0
2384 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.vvw.nxv4f32.iXLen.nxv4f16.nxv4f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, <vscale x 4 x half>, iXLen)
2386 define <vscale x 8 x float> @test_f_sf_vc_v_vvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl) {
2387 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m2:
2388 ; CHECK: # %bb.0: # %entry
2389 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
2390 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
2393 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, <vscale x 8 x half> %vs1, iXLen %vl)
2394 ret <vscale x 8 x float> %0
2397 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.vvw.nxv8f32.iXLen.nxv8f16.nxv8f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, <vscale x 8 x half>, iXLen)
2399 define <vscale x 16 x float> @test_f_sf_vc_v_vvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl) {
2400 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e16m4:
2401 ; CHECK: # %bb.0: # %entry
2402 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
2403 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
2406 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, <vscale x 16 x half> %vs1, iXLen %vl)
2407 ret <vscale x 16 x float> %0
2410 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.vvw.nxv16f32.iXLen.nxv16f16.nxv16f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, <vscale x 16 x half>, iXLen)
2412 define <vscale x 1 x double> @test_f_sf_vc_v_vvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl) {
2413 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e32mf2:
2414 ; CHECK: # %bb.0: # %entry
2415 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
2416 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v9, v10
2419 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, <vscale x 1 x float> %vs1, iXLen %vl)
2420 ret <vscale x 1 x double> %0
2423 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.vvw.nxv1f64.iXLen.nxv1f32.nxv1f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, <vscale x 1 x float>, iXLen)
2425 define <vscale x 2 x double> @test_f_sf_vc_v_vvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl) {
2426 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m1:
2427 ; CHECK: # %bb.0: # %entry
2428 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
2429 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v10, v11
2432 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, <vscale x 2 x float> %vs1, iXLen %vl)
2433 ret <vscale x 2 x double> %0
2436 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.vvw.nxv2f64.iXLen.nxv2f32.nxv2f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, <vscale x 2 x float>, iXLen)
2438 define <vscale x 4 x double> @test_f_sf_vc_v_vvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl) {
2439 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m2:
2440 ; CHECK: # %bb.0: # %entry
2441 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
2442 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v12, v14
2445 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, <vscale x 4 x float> %vs1, iXLen %vl)
2446 ret <vscale x 4 x double> %0
2449 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.vvw.nxv4f64.iXLen.nxv4f32.nxv4f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, <vscale x 4 x float>, iXLen)
2451 define <vscale x 8 x double> @test_f_sf_vc_v_vvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl) {
2452 ; CHECK-LABEL: test_f_sf_vc_v_vvw_e32m4:
2453 ; CHECK: # %bb.0: # %entry
2454 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
2455 ; CHECK-NEXT: sf.vc.v.vvw 3, v8, v16, v20
2458 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, <vscale x 8 x float> %vs1, iXLen %vl)
2459 ret <vscale x 8 x double> %0
2462 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.vvw.nxv8f64.iXLen.nxv8f32.nxv8f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, <vscale x 8 x float>, iXLen)
2464 define void @test_f_sf_vc_xvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2465 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e16mf4:
2466 ; CHECK: # %bb.0: # %entry
2467 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2468 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
2471 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
2475 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f32.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
2477 define void @test_f_sf_vc_xvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2478 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e16mf2:
2479 ; CHECK: # %bb.0: # %entry
2480 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2481 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
2484 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
2488 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f32.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
2490 define void @test_f_sf_vc_xvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2491 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m1:
2492 ; CHECK: # %bb.0: # %entry
2493 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
2494 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
2497 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
2501 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f32.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
2503 define void @test_f_sf_vc_xvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2504 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m2:
2505 ; CHECK: # %bb.0: # %entry
2506 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
2507 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
2510 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
2514 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f32.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
2516 define void @test_f_sf_vc_xvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2517 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e16m4:
2518 ; CHECK: # %bb.0: # %entry
2519 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2520 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
2523 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
2527 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv16f32.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
2529 define void @test_f_sf_vc_xvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2530 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e32mf2:
2531 ; CHECK: # %bb.0: # %entry
2532 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
2533 ; CHECK-NEXT: sf.vc.xvw 3, v8, v9, a0
2536 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
2540 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv1f64.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
2542 define void @test_f_sf_vc_xvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2543 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m1:
2544 ; CHECK: # %bb.0: # %entry
2545 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
2546 ; CHECK-NEXT: sf.vc.xvw 3, v8, v10, a0
2549 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
2553 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv2f64.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
2555 define void @test_f_sf_vc_xvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2556 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m2:
2557 ; CHECK: # %bb.0: # %entry
2558 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2559 ; CHECK-NEXT: sf.vc.xvw 3, v8, v12, a0
2562 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
2566 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv4f64.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
2568 define void @test_f_sf_vc_xvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2569 ; CHECK-LABEL: test_f_sf_vc_xvw_se_e32m4:
2570 ; CHECK: # %bb.0: # %entry
2571 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
2572 ; CHECK-NEXT: sf.vc.xvw 3, v8, v16, a0
2575 tail call void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
2579 declare void @llvm.riscv.sf.vc.xvw.se.iXLen.nxv8f64.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
2581 define <vscale x 1 x float> @test_f_sf_vc_v_xvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2582 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16mf4:
2583 ; CHECK: # %bb.0: # %entry
2584 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
2585 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2588 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
2589 ret <vscale x 1 x float> %0
2592 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
2594 define <vscale x 2 x float> @test_f_sf_vc_v_xvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2595 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16mf2:
2596 ; CHECK: # %bb.0: # %entry
2597 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
2598 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2601 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
2602 ret <vscale x 2 x float> %0
2605 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
2607 define <vscale x 4 x float> @test_f_sf_vc_v_xvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2608 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m1:
2609 ; CHECK: # %bb.0: # %entry
2610 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
2611 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
2614 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
2615 ret <vscale x 4 x float> %0
2618 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
2620 define <vscale x 8 x float> @test_f_sf_vc_v_xvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2621 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m2:
2622 ; CHECK: # %bb.0: # %entry
2623 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
2624 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
2627 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
2628 ret <vscale x 8 x float> %0
2631 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
2633 define <vscale x 16 x float> @test_f_sf_vc_v_xvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2634 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e16m4:
2635 ; CHECK: # %bb.0: # %entry
2636 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
2637 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
2640 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
2641 ret <vscale x 16 x float> %0
2644 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.se.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
2646 define <vscale x 1 x double> @test_f_sf_vc_v_xvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2647 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32mf2:
2648 ; CHECK: # %bb.0: # %entry
2649 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
2650 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2653 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
2654 ret <vscale x 1 x double> %0
2657 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv1f64.i32.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
2659 define <vscale x 2 x double> @test_f_sf_vc_v_xvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2660 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m1:
2661 ; CHECK: # %bb.0: # %entry
2662 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
2663 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
2666 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
2667 ret <vscale x 2 x double> %0
2670 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv2f64.i32.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
2672 define <vscale x 4 x double> @test_f_sf_vc_v_xvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2673 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m2:
2674 ; CHECK: # %bb.0: # %entry
2675 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
2676 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
2679 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
2680 ret <vscale x 4 x double> %0
2683 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv4f64.i32.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
2685 define <vscale x 8 x double> @test_f_sf_vc_v_xvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2686 ; CHECK-LABEL: test_f_sf_vc_v_xvw_se_e32m4:
2687 ; CHECK: # %bb.0: # %entry
2688 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
2689 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
2692 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
2693 ret <vscale x 8 x double> %0
2696 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.se.nxv8f64.i32.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
2698 define <vscale x 1 x float> @test_f_sf_vc_v_xvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2699 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e16mf4:
2700 ; CHECK: # %bb.0: # %entry
2701 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
2702 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2705 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, i16 %rs1, iXLen %vl)
2706 ret <vscale x 1 x float> %0
2709 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.xvw.nxv1f32.iXLen.nxv1f16.i16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, i16, iXLen)
2711 define <vscale x 2 x float> @test_f_sf_vc_v_xvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2712 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e16mf2:
2713 ; CHECK: # %bb.0: # %entry
2714 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
2715 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2718 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, i16 %rs1, iXLen %vl)
2719 ret <vscale x 2 x float> %0
2722 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.xvw.nxv2f32.iXLen.nxv2f16.i16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, i16, iXLen)
2724 define <vscale x 4 x float> @test_f_sf_vc_v_xvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2725 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m1:
2726 ; CHECK: # %bb.0: # %entry
2727 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
2728 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
2731 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, i16 %rs1, iXLen %vl)
2732 ret <vscale x 4 x float> %0
2735 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.xvw.nxv4f32.iXLen.nxv4f16.i16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, i16, iXLen)
2737 define <vscale x 8 x float> @test_f_sf_vc_v_xvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2738 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m2:
2739 ; CHECK: # %bb.0: # %entry
2740 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, ma
2741 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
2744 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, i16 %rs1, iXLen %vl)
2745 ret <vscale x 8 x float> %0
2748 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.xvw.nxv8f32.iXLen.nxv8f16.i16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, i16, iXLen)
2750 define <vscale x 16 x float> @test_f_sf_vc_v_xvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 zeroext %rs1, iXLen %vl) {
2751 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e16m4:
2752 ; CHECK: # %bb.0: # %entry
2753 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, tu, ma
2754 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
2757 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, i16 %rs1, iXLen %vl)
2758 ret <vscale x 16 x float> %0
2761 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.xvw.nxv16f32.iXLen.nxv16f16.i16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, i16, iXLen)
2763 define <vscale x 1 x double> @test_f_sf_vc_v_xvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2764 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e32mf2:
2765 ; CHECK: # %bb.0: # %entry
2766 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
2767 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v9, a0
2770 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, i32 %rs1, iXLen %vl)
2771 ret <vscale x 1 x double> %0
2774 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.xvw.nxv1f64.iXLen.nxv1f32.i32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, i32, iXLen)
2776 define <vscale x 2 x double> @test_f_sf_vc_v_xvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2777 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m1:
2778 ; CHECK: # %bb.0: # %entry
2779 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma
2780 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v10, a0
2783 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, i32 %rs1, iXLen %vl)
2784 ret <vscale x 2 x double> %0
2787 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.xvw.nxv2f64.iXLen.nxv2f32.i32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, i32, iXLen)
2789 define <vscale x 4 x double> @test_f_sf_vc_v_xvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2790 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m2:
2791 ; CHECK: # %bb.0: # %entry
2792 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, ma
2793 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v12, a0
2796 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, i32 %rs1, iXLen %vl)
2797 ret <vscale x 4 x double> %0
2800 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.xvw.nxv4f64.iXLen.nxv4f32.i32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, i32, iXLen)
2802 define <vscale x 8 x double> @test_f_sf_vc_v_xvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 signext %rs1, iXLen %vl) {
2803 ; CHECK-LABEL: test_f_sf_vc_v_xvw_e32m4:
2804 ; CHECK: # %bb.0: # %entry
2805 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, ma
2806 ; CHECK-NEXT: sf.vc.v.xvw 3, v8, v16, a0
2809 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, i32 %rs1, iXLen %vl)
2810 ret <vscale x 8 x double> %0
2813 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.xvw.nxv8f64.iXLen.nxv8f32.i32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, i32, iXLen)
2815 define void @test_f_sf_vc_ivw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
2816 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e16mf4:
2817 ; CHECK: # %bb.0: # %entry
2818 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
2819 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
2822 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
2826 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f32.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
2828 define void @test_f_sf_vc_ivw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
2829 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e16mf2:
2830 ; CHECK: # %bb.0: # %entry
2831 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
2832 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
2835 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
2839 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f32.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
2841 define void @test_f_sf_vc_ivw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
2842 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m1:
2843 ; CHECK: # %bb.0: # %entry
2844 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
2845 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
2848 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
2852 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f32.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
2854 define void @test_f_sf_vc_ivw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
2855 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m2:
2856 ; CHECK: # %bb.0: # %entry
2857 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
2858 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
2861 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
2865 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f32.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
2867 define void @test_f_sf_vc_ivw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
2868 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e16m4:
2869 ; CHECK: # %bb.0: # %entry
2870 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
2871 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
2874 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
2878 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv16f32.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
2880 define void @test_f_sf_vc_ivw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
2881 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e32mf2:
2882 ; CHECK: # %bb.0: # %entry
2883 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
2884 ; CHECK-NEXT: sf.vc.ivw 3, v8, v9, 10
2887 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
2891 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv1f64.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
2893 define void @test_f_sf_vc_ivw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
2894 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m1:
2895 ; CHECK: # %bb.0: # %entry
2896 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
2897 ; CHECK-NEXT: sf.vc.ivw 3, v8, v10, 10
2900 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
2904 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv2f64.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
2906 define void @test_f_sf_vc_ivw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
2907 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m2:
2908 ; CHECK: # %bb.0: # %entry
2909 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
2910 ; CHECK-NEXT: sf.vc.ivw 3, v8, v12, 10
2913 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
2917 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv4f64.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
2919 define void @test_f_sf_vc_ivw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
2920 ; CHECK-LABEL: test_f_sf_vc_ivw_se_e32m4:
2921 ; CHECK: # %bb.0: # %entry
2922 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
2923 ; CHECK-NEXT: sf.vc.ivw 3, v8, v16, 10
2926 tail call void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
2930 declare void @llvm.riscv.sf.vc.ivw.se.iXLen.nxv8f64.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
2932 define <vscale x 1 x float> @test_f_sf_vc_v_ivw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
2933 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16mf4:
2934 ; CHECK: # %bb.0: # %entry
2935 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
2936 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
2939 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
2940 ret <vscale x 1 x float> %0
2943 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
2945 define <vscale x 2 x float> @test_f_sf_vc_v_ivw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
2946 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16mf2:
2947 ; CHECK: # %bb.0: # %entry
2948 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
2949 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
2952 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
2953 ret <vscale x 2 x float> %0
2956 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
2958 define <vscale x 4 x float> @test_f_sf_vc_v_ivw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
2959 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m1:
2960 ; CHECK: # %bb.0: # %entry
2961 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
2962 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
2965 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
2966 ret <vscale x 4 x float> %0
2969 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
2971 define <vscale x 8 x float> @test_f_sf_vc_v_ivw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
2972 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m2:
2973 ; CHECK: # %bb.0: # %entry
2974 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
2975 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
2978 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
2979 ret <vscale x 8 x float> %0
2982 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
2984 define <vscale x 16 x float> @test_f_sf_vc_v_ivw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
2985 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e16m4:
2986 ; CHECK: # %bb.0: # %entry
2987 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
2988 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
2991 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
2992 ret <vscale x 16 x float> %0
2995 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.se.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
2997 define <vscale x 1 x double> @test_f_sf_vc_v_ivw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
2998 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32mf2:
2999 ; CHECK: # %bb.0: # %entry
3000 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
3001 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
3004 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
3005 ret <vscale x 1 x double> %0
3008 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
3010 define <vscale x 2 x double> @test_f_sf_vc_v_ivw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
3011 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m1:
3012 ; CHECK: # %bb.0: # %entry
3013 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
3014 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
3017 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
3018 ret <vscale x 2 x double> %0
3021 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
3023 define <vscale x 4 x double> @test_f_sf_vc_v_ivw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
3024 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m2:
3025 ; CHECK: # %bb.0: # %entry
3026 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
3027 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
3030 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
3031 ret <vscale x 4 x double> %0
3034 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
3036 define <vscale x 8 x double> @test_f_sf_vc_v_ivw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
3037 ; CHECK-LABEL: test_f_sf_vc_v_ivw_se_e32m4:
3038 ; CHECK: # %bb.0: # %entry
3039 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
3040 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
3043 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
3044 ret <vscale x 8 x double> %0
3047 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.se.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
3049 define <vscale x 1 x float> @test_f_sf_vc_v_ivw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen %vl) {
3050 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e16mf4:
3051 ; CHECK: # %bb.0: # %entry
3052 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
3053 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
3056 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen 3, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, iXLen 10, iXLen %vl)
3057 ret <vscale x 1 x float> %0
3060 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.ivw.nxv1f32.iXLen.nxv1f16.iXLen.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, iXLen, iXLen)
3062 define <vscale x 2 x float> @test_f_sf_vc_v_ivw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen %vl) {
3063 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e16mf2:
3064 ; CHECK: # %bb.0: # %entry
3065 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
3066 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
3069 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen 3, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, iXLen 10, iXLen %vl)
3070 ret <vscale x 2 x float> %0
3073 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.ivw.nxv2f32.iXLen.nxv2f16.iXLen.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, iXLen, iXLen)
3075 define <vscale x 4 x float> @test_f_sf_vc_v_ivw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen %vl) {
3076 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m1:
3077 ; CHECK: # %bb.0: # %entry
3078 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
3079 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
3082 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen 3, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, iXLen 10, iXLen %vl)
3083 ret <vscale x 4 x float> %0
3086 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.ivw.nxv4f32.iXLen.nxv4f16.iXLen.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, iXLen, iXLen)
3088 define <vscale x 8 x float> @test_f_sf_vc_v_ivw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen %vl) {
3089 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m2:
3090 ; CHECK: # %bb.0: # %entry
3091 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
3092 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
3095 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen 3, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, iXLen 10, iXLen %vl)
3096 ret <vscale x 8 x float> %0
3099 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.ivw.nxv8f32.iXLen.nxv8f16.iXLen.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, iXLen, iXLen)
3101 define <vscale x 16 x float> @test_f_sf_vc_v_ivw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen %vl) {
3102 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e16m4:
3103 ; CHECK: # %bb.0: # %entry
3104 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
3105 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
3108 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen 3, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, iXLen 10, iXLen %vl)
3109 ret <vscale x 16 x float> %0
3112 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.ivw.nxv16f32.iXLen.nxv16f16.iXLen.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, iXLen, iXLen)
3114 define <vscale x 1 x double> @test_f_sf_vc_v_ivw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen %vl) {
3115 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e32mf2:
3116 ; CHECK: # %bb.0: # %entry
3117 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
3118 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v9, 10
3121 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen 3, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, iXLen 10, iXLen %vl)
3122 ret <vscale x 1 x double> %0
3125 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.ivw.nxv1f64.iXLen.nxv1f32.iXLen.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, iXLen, iXLen)
3127 define <vscale x 2 x double> @test_f_sf_vc_v_ivw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen %vl) {
3128 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m1:
3129 ; CHECK: # %bb.0: # %entry
3130 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
3131 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v10, 10
3134 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen 3, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, iXLen 10, iXLen %vl)
3135 ret <vscale x 2 x double> %0
3138 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.ivw.nxv2f64.iXLen.nxv2f32.iXLen.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, iXLen, iXLen)
3140 define <vscale x 4 x double> @test_f_sf_vc_v_ivw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen %vl) {
3141 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m2:
3142 ; CHECK: # %bb.0: # %entry
3143 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
3144 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v12, 10
3147 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen 3, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, iXLen 10, iXLen %vl)
3148 ret <vscale x 4 x double> %0
3151 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.ivw.nxv4f64.iXLen.nxv4f32.iXLen.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, iXLen, iXLen)
3153 define <vscale x 8 x double> @test_f_sf_vc_v_ivw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen %vl) {
3154 ; CHECK-LABEL: test_f_sf_vc_v_ivw_e32m4:
3155 ; CHECK: # %bb.0: # %entry
3156 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
3157 ; CHECK-NEXT: sf.vc.v.ivw 3, v8, v16, 10
3160 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen 3, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, iXLen 10, iXLen %vl)
3161 ret <vscale x 8 x double> %0
3164 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.ivw.nxv8f64.iXLen.nxv8f32.iXLen.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, iXLen, iXLen)
3166 define void @test_f_sf_vc_fvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
3167 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e16mf4:
3168 ; CHECK: # %bb.0: # %entry
3169 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
3170 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
3173 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1f16.f16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
3177 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f32.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
3179 define void @test_f_sf_vc_fvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
3180 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e16mf2:
3181 ; CHECK: # %bb.0: # %entry
3182 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
3183 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
3186 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2f16.f16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
3190 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f32.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
3192 define void @test_f_sf_vc_fvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
3193 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m1:
3194 ; CHECK: # %bb.0: # %entry
3195 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
3196 ; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
3199 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4f16.f16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
3203 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f32.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
3205 define void @test_f_sf_vc_fvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
3206 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m2:
3207 ; CHECK: # %bb.0: # %entry
3208 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
3209 ; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
3212 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8f16.f16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
3216 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f32.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
3218 define void @test_f_sf_vc_fvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
3219 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e16m4:
3220 ; CHECK: # %bb.0: # %entry
3221 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
3222 ; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0
3225 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16f16.f16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
3229 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv16f32.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
3231 define void @test_f_sf_vc_fvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
3232 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e32mf2:
3233 ; CHECK: # %bb.0: # %entry
3234 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
3235 ; CHECK-NEXT: sf.vc.fvw 1, v8, v9, fa0
3238 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
3242 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv1f64.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
3244 define void @test_f_sf_vc_fvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
3245 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m1:
3246 ; CHECK: # %bb.0: # %entry
3247 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
3248 ; CHECK-NEXT: sf.vc.fvw 1, v8, v10, fa0
3251 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
3255 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv2f64.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
3257 define void @test_f_sf_vc_fvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
3258 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m2:
3259 ; CHECK: # %bb.0: # %entry
3260 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
3261 ; CHECK-NEXT: sf.vc.fvw 1, v8, v12, fa0
3264 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
3268 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv4f64.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
3270 define void @test_f_sf_vc_fvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
3271 ; CHECK-LABEL: test_f_sf_vc_fvw_se_e32m4:
3272 ; CHECK: # %bb.0: # %entry
3273 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
3274 ; CHECK-NEXT: sf.vc.fvw 1, v8, v16, fa0
3277 tail call void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
3281 declare void @llvm.riscv.sf.vc.fvw.se.iXLen.nxv8f64.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)
3283 define <vscale x 1 x float> @test_f_sf_vc_v_fvw_se_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
3284 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16mf4:
3285 ; CHECK: # %bb.0: # %entry
3286 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
3287 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
3290 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
3291 ret <vscale x 1 x float> %0
3294 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
3296 define <vscale x 2 x float> @test_f_sf_vc_v_fvw_se_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
3297 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16mf2:
3298 ; CHECK: # %bb.0: # %entry
3299 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
3300 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
3303 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
3304 ret <vscale x 2 x float> %0
3307 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
3309 define <vscale x 4 x float> @test_f_sf_vc_v_fvw_se_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
3310 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m1:
3311 ; CHECK: # %bb.0: # %entry
3312 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
3313 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
3316 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
3317 ret <vscale x 4 x float> %0
3320 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
3322 define <vscale x 8 x float> @test_f_sf_vc_v_fvw_se_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
3323 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m2:
3324 ; CHECK: # %bb.0: # %entry
3325 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
3326 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
3329 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
3330 ret <vscale x 8 x float> %0
3333 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
3335 define <vscale x 16 x float> @test_f_sf_vc_v_fvw_se_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
3336 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e16m4:
3337 ; CHECK: # %bb.0: # %entry
3338 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
3339 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
3342 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
3343 ret <vscale x 16 x float> %0
3346 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.se.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
3348 define <vscale x 1 x double> @test_f_sf_vc_v_fvw_se_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
3349 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32mf2:
3350 ; CHECK: # %bb.0: # %entry
3351 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
3352 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
3355 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
3356 ret <vscale x 1 x double> %0
3359 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
3361 define <vscale x 2 x double> @test_f_sf_vc_v_fvw_se_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
3362 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m1:
3363 ; CHECK: # %bb.0: # %entry
3364 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
3365 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
3368 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
3369 ret <vscale x 2 x double> %0
3372 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
3374 define <vscale x 4 x double> @test_f_sf_vc_v_fvw_se_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
3375 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m2:
3376 ; CHECK: # %bb.0: # %entry
3377 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
3378 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
3381 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
3382 ret <vscale x 4 x double> %0
3385 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
3387 define <vscale x 8 x double> @test_f_sf_vc_v_fvw_se_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
3388 ; CHECK-LABEL: test_f_sf_vc_v_fvw_se_e32m4:
3389 ; CHECK: # %bb.0: # %entry
3390 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
3391 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
3394 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
3395 ret <vscale x 8 x double> %0
3398 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.se.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)
3400 define <vscale x 1 x float> @test_f_sf_vc_v_fvw_e16mf4(<vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl) {
3401 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e16mf4:
3402 ; CHECK: # %bb.0: # %entry
3403 ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
3404 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
3407 %0 = tail call <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen 1, <vscale x 1 x float> %vd, <vscale x 1 x half> %vs2, half %fs1, iXLen %vl)
3408 ret <vscale x 1 x float> %0
3411 declare <vscale x 1 x float> @llvm.riscv.sf.vc.v.fvw.nxv1f32.iXLen.nxv1f16.f16.iXLen(iXLen, <vscale x 1 x float>, <vscale x 1 x half>, half, iXLen)
3413 define <vscale x 2 x float> @test_f_sf_vc_v_fvw_e16mf2(<vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl) {
3414 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e16mf2:
3415 ; CHECK: # %bb.0: # %entry
3416 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
3417 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
3420 %0 = tail call <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen 1, <vscale x 2 x float> %vd, <vscale x 2 x half> %vs2, half %fs1, iXLen %vl)
3421 ret <vscale x 2 x float> %0
3424 declare <vscale x 2 x float> @llvm.riscv.sf.vc.v.fvw.nxv2f32.iXLen.nxv2f16.f16.iXLen(iXLen, <vscale x 2 x float>, <vscale x 2 x half>, half, iXLen)
3426 define <vscale x 4 x float> @test_f_sf_vc_v_fvw_e16m1(<vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl) {
3427 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m1:
3428 ; CHECK: # %bb.0: # %entry
3429 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
3430 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
3433 %0 = tail call <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen 1, <vscale x 4 x float> %vd, <vscale x 4 x half> %vs2, half %fs1, iXLen %vl)
3434 ret <vscale x 4 x float> %0
3437 declare <vscale x 4 x float> @llvm.riscv.sf.vc.v.fvw.nxv4f32.iXLen.nxv4f16.f16.iXLen(iXLen, <vscale x 4 x float>, <vscale x 4 x half>, half, iXLen)
3439 define <vscale x 8 x float> @test_f_sf_vc_v_fvw_e16m2(<vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl) {
3440 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m2:
3441 ; CHECK: # %bb.0: # %entry
3442 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, tu, ma
3443 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
3446 %0 = tail call <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen 1, <vscale x 8 x float> %vd, <vscale x 8 x half> %vs2, half %fs1, iXLen %vl)
3447 ret <vscale x 8 x float> %0
3450 declare <vscale x 8 x float> @llvm.riscv.sf.vc.v.fvw.nxv8f32.iXLen.nxv8f16.f16.iXLen(iXLen, <vscale x 8 x float>, <vscale x 8 x half>, half, iXLen)
3452 define <vscale x 16 x float> @test_f_sf_vc_v_fvw_e16m4(<vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl) {
3453 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e16m4:
3454 ; CHECK: # %bb.0: # %entry
3455 ; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, ma
3456 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
3459 %0 = tail call <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen 1, <vscale x 16 x float> %vd, <vscale x 16 x half> %vs2, half %fs1, iXLen %vl)
3460 ret <vscale x 16 x float> %0
3463 declare <vscale x 16 x float> @llvm.riscv.sf.vc.v.fvw.nxv16f32.iXLen.nxv16f16.f16.iXLen(iXLen, <vscale x 16 x float>, <vscale x 16 x half>, half, iXLen)
3465 define <vscale x 1 x double> @test_f_sf_vc_v_fvw_e32mf2(<vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl) {
3466 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e32mf2:
3467 ; CHECK: # %bb.0: # %entry
3468 ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
3469 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v9, fa0
3472 %0 = tail call <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen 1, <vscale x 1 x double> %vd, <vscale x 1 x float> %vs2, float %fs1, iXLen %vl)
3473 ret <vscale x 1 x double> %0
3476 declare <vscale x 1 x double> @llvm.riscv.sf.vc.v.fvw.nxv1f64.iXLen.nxv1f32.f32.iXLen(iXLen, <vscale x 1 x double>, <vscale x 1 x float>, float, iXLen)
3478 define <vscale x 2 x double> @test_f_sf_vc_v_fvw_e32m1(<vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl) {
3479 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m1:
3480 ; CHECK: # %bb.0: # %entry
3481 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
3482 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v10, fa0
3485 %0 = tail call <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen 1, <vscale x 2 x double> %vd, <vscale x 2 x float> %vs2, float %fs1, iXLen %vl)
3486 ret <vscale x 2 x double> %0
3489 declare <vscale x 2 x double> @llvm.riscv.sf.vc.v.fvw.nxv2f64.iXLen.nxv2f32.f32.iXLen(iXLen, <vscale x 2 x double>, <vscale x 2 x float>, float, iXLen)
3491 define <vscale x 4 x double> @test_f_sf_vc_v_fvw_e32m2(<vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl) {
3492 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m2:
3493 ; CHECK: # %bb.0: # %entry
3494 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, tu, ma
3495 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v12, fa0
3498 %0 = tail call <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen 1, <vscale x 4 x double> %vd, <vscale x 4 x float> %vs2, float %fs1, iXLen %vl)
3499 ret <vscale x 4 x double> %0
3502 declare <vscale x 4 x double> @llvm.riscv.sf.vc.v.fvw.nxv4f64.iXLen.nxv4f32.f32.iXLen(iXLen, <vscale x 4 x double>, <vscale x 4 x float>, float, iXLen)
3504 define <vscale x 8 x double> @test_f_sf_vc_v_fvw_e32m4(<vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl) {
3505 ; CHECK-LABEL: test_f_sf_vc_v_fvw_e32m4:
3506 ; CHECK: # %bb.0: # %entry
3507 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, tu, ma
3508 ; CHECK-NEXT: sf.vc.v.fvw 1, v8, v16, fa0
3511 %0 = tail call <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen 1, <vscale x 8 x double> %vd, <vscale x 8 x float> %vs2, float %fs1, iXLen %vl)
3512 ret <vscale x 8 x double> %0
3515 declare <vscale x 8 x double> @llvm.riscv.sf.vc.v.fvw.nxv8f64.iXLen.nxv8f32.f32.iXLen(iXLen, <vscale x 8 x double>, <vscale x 8 x float>, float, iXLen)