1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr , i32)
6 declare {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, <vscale x 16 x i1>, i32, i32)
8 define <vscale x 16 x i16> @test_vlseg2ff_nxv16i16(ptr %base, i32 %vl, ptr %outvl) {
9 ; CHECK-LABEL: test_vlseg2ff_nxv16i16:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
12 ; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
13 ; CHECK-NEXT: csrr a0, vl
14 ; CHECK-NEXT: sw a0, 0(a2)
17 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
18 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %0, 1
19 %2 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %0, 2
20 store i32 %2, ptr %outvl
21 ret <vscale x 16 x i16> %1
24 define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
25 ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16:
26 ; CHECK: # %bb.0: # %entry
27 ; CHECK-NEXT: vmv4r.v v4, v8
28 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
29 ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
30 ; CHECK-NEXT: csrr a0, vl
31 ; CHECK-NEXT: sw a0, 0(a2)
34 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
35 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %0, 1
36 %2 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} %0, 2
37 store i32 %2, ptr %outvl
38 ret <vscale x 16 x i16> %1
41 declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
42 declare {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32, i32)
44 define <vscale x 1 x i8> @test_vlseg2ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
45 ; CHECK-LABEL: test_vlseg2ff_nxv1i8:
46 ; CHECK: # %bb.0: # %entry
47 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
48 ; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
49 ; CHECK-NEXT: csrr a0, vl
50 ; CHECK-NEXT: sw a0, 0(a2)
53 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
54 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
55 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 2
56 store i32 %2, ptr %outvl
57 ret <vscale x 1 x i8> %1
60 define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
61 ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i8:
62 ; CHECK: # %bb.0: # %entry
63 ; CHECK-NEXT: vmv1r.v v7, v8
64 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
65 ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
66 ; CHECK-NEXT: csrr a0, vl
67 ; CHECK-NEXT: sw a0, 0(a2)
70 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
71 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
72 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 2
73 store i32 %2, ptr %outvl
74 ret <vscale x 1 x i8> %1
77 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
78 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32, i32)
80 define <vscale x 1 x i8> @test_vlseg3ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
81 ; CHECK-LABEL: test_vlseg3ff_nxv1i8:
82 ; CHECK: # %bb.0: # %entry
83 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
84 ; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
85 ; CHECK-NEXT: csrr a0, vl
86 ; CHECK-NEXT: sw a0, 0(a2)
89 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
90 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
91 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 3
92 store i32 %2, ptr %outvl
93 ret <vscale x 1 x i8> %1
96 define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
97 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8:
98 ; CHECK: # %bb.0: # %entry
99 ; CHECK-NEXT: vmv1r.v v7, v8
100 ; CHECK-NEXT: vmv1r.v v9, v8
101 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
102 ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
103 ; CHECK-NEXT: csrr a0, vl
104 ; CHECK-NEXT: sw a0, 0(a2)
107 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
108 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
109 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 3
110 store i32 %2, ptr %outvl
111 ret <vscale x 1 x i8> %1
114 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
115 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32, i32)
117 define <vscale x 1 x i8> @test_vlseg4ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
118 ; CHECK-LABEL: test_vlseg4ff_nxv1i8:
119 ; CHECK: # %bb.0: # %entry
120 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
121 ; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
122 ; CHECK-NEXT: csrr a0, vl
123 ; CHECK-NEXT: sw a0, 0(a2)
126 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
127 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
128 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 4
129 store i32 %2, ptr %outvl
130 ret <vscale x 1 x i8> %1
133 define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
134 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8:
135 ; CHECK: # %bb.0: # %entry
136 ; CHECK-NEXT: vmv1r.v v7, v8
137 ; CHECK-NEXT: vmv1r.v v9, v8
138 ; CHECK-NEXT: vmv1r.v v10, v8
139 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
140 ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
141 ; CHECK-NEXT: csrr a0, vl
142 ; CHECK-NEXT: sw a0, 0(a2)
145 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
146 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
147 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 4
148 store i32 %2, ptr %outvl
149 ret <vscale x 1 x i8> %1
152 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
153 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32, i32)
155 define <vscale x 1 x i8> @test_vlseg5ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
156 ; CHECK-LABEL: test_vlseg5ff_nxv1i8:
157 ; CHECK: # %bb.0: # %entry
158 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
159 ; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
160 ; CHECK-NEXT: csrr a0, vl
161 ; CHECK-NEXT: sw a0, 0(a2)
164 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
165 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
166 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 5
167 store i32 %2, ptr %outvl
168 ret <vscale x 1 x i8> %1
171 define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
172 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8:
173 ; CHECK: # %bb.0: # %entry
174 ; CHECK-NEXT: vmv1r.v v7, v8
175 ; CHECK-NEXT: vmv1r.v v9, v8
176 ; CHECK-NEXT: vmv1r.v v10, v8
177 ; CHECK-NEXT: vmv1r.v v11, v8
178 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
179 ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
180 ; CHECK-NEXT: csrr a0, vl
181 ; CHECK-NEXT: sw a0, 0(a2)
184 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
185 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
186 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 5
187 store i32 %2, ptr %outvl
188 ret <vscale x 1 x i8> %1
191 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
192 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32, i32)
194 define <vscale x 1 x i8> @test_vlseg6ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
195 ; CHECK-LABEL: test_vlseg6ff_nxv1i8:
196 ; CHECK: # %bb.0: # %entry
197 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
198 ; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
199 ; CHECK-NEXT: csrr a0, vl
200 ; CHECK-NEXT: sw a0, 0(a2)
203 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
204 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
205 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 6
206 store i32 %2, ptr %outvl
207 ret <vscale x 1 x i8> %1
210 define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
211 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8:
212 ; CHECK: # %bb.0: # %entry
213 ; CHECK-NEXT: vmv1r.v v7, v8
214 ; CHECK-NEXT: vmv1r.v v9, v8
215 ; CHECK-NEXT: vmv1r.v v10, v8
216 ; CHECK-NEXT: vmv1r.v v11, v8
217 ; CHECK-NEXT: vmv1r.v v12, v8
218 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
219 ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
220 ; CHECK-NEXT: csrr a0, vl
221 ; CHECK-NEXT: sw a0, 0(a2)
224 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
225 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
226 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 6
227 store i32 %2, ptr %outvl
228 ret <vscale x 1 x i8> %1
231 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
232 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32, i32)
234 define <vscale x 1 x i8> @test_vlseg7ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
235 ; CHECK-LABEL: test_vlseg7ff_nxv1i8:
236 ; CHECK: # %bb.0: # %entry
237 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
238 ; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
239 ; CHECK-NEXT: csrr a0, vl
240 ; CHECK-NEXT: sw a0, 0(a2)
243 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
244 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
245 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 7
246 store i32 %2, ptr %outvl
247 ret <vscale x 1 x i8> %1
250 define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
251 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8:
252 ; CHECK: # %bb.0: # %entry
253 ; CHECK-NEXT: vmv1r.v v7, v8
254 ; CHECK-NEXT: vmv1r.v v9, v8
255 ; CHECK-NEXT: vmv1r.v v10, v8
256 ; CHECK-NEXT: vmv1r.v v11, v8
257 ; CHECK-NEXT: vmv1r.v v12, v8
258 ; CHECK-NEXT: vmv1r.v v13, v8
259 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
260 ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
261 ; CHECK-NEXT: csrr a0, vl
262 ; CHECK-NEXT: sw a0, 0(a2)
265 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
266 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
267 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 7
268 store i32 %2, ptr %outvl
269 ret <vscale x 1 x i8> %1
272 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr , i32)
273 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, <vscale x 1 x i1>, i32, i32)
275 define <vscale x 1 x i8> @test_vlseg8ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
276 ; CHECK-LABEL: test_vlseg8ff_nxv1i8:
277 ; CHECK: # %bb.0: # %entry
278 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
279 ; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
280 ; CHECK-NEXT: csrr a0, vl
281 ; CHECK-NEXT: sw a0, 0(a2)
284 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
285 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
286 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 8
287 store i32 %2, ptr %outvl
288 ret <vscale x 1 x i8> %1
291 define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
292 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8:
293 ; CHECK: # %bb.0: # %entry
294 ; CHECK-NEXT: vmv1r.v v7, v8
295 ; CHECK-NEXT: vmv1r.v v9, v8
296 ; CHECK-NEXT: vmv1r.v v10, v8
297 ; CHECK-NEXT: vmv1r.v v11, v8
298 ; CHECK-NEXT: vmv1r.v v12, v8
299 ; CHECK-NEXT: vmv1r.v v13, v8
300 ; CHECK-NEXT: vmv1r.v v14, v8
301 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
302 ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
303 ; CHECK-NEXT: csrr a0, vl
304 ; CHECK-NEXT: sw a0, 0(a2)
307 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
308 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 1
309 %2 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} %0, 8
310 store i32 %2, ptr %outvl
311 ret <vscale x 1 x i8> %1
314 declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i32)
315 declare {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i32, i32)
317 define <vscale x 16 x i8> @test_vlseg2ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl) {
318 ; CHECK-LABEL: test_vlseg2ff_nxv16i8:
319 ; CHECK: # %bb.0: # %entry
320 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
321 ; CHECK-NEXT: vlseg2e8ff.v v6, (a0)
322 ; CHECK-NEXT: csrr a0, vl
323 ; CHECK-NEXT: sw a0, 0(a2)
326 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
327 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 1
328 %2 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
329 store i32 %2, ptr %outvl
330 ret <vscale x 16 x i8> %1
333 define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
334 ; CHECK-LABEL: test_vlseg2ff_mask_nxv16i8:
335 ; CHECK: # %bb.0: # %entry
336 ; CHECK-NEXT: vmv2r.v v6, v8
337 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
338 ; CHECK-NEXT: vlseg2e8ff.v v6, (a0), v0.t
339 ; CHECK-NEXT: csrr a0, vl
340 ; CHECK-NEXT: sw a0, 0(a2)
343 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
344 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 1
345 %2 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 2
346 store i32 %2, ptr %outvl
347 ret <vscale x 16 x i8> %1
350 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i32)
351 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i32, i32)
353 define <vscale x 16 x i8> @test_vlseg3ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl) {
354 ; CHECK-LABEL: test_vlseg3ff_nxv16i8:
355 ; CHECK: # %bb.0: # %entry
356 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
357 ; CHECK-NEXT: vlseg3e8ff.v v6, (a0)
358 ; CHECK-NEXT: csrr a0, vl
359 ; CHECK-NEXT: sw a0, 0(a2)
362 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
363 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 1
364 %2 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 3
365 store i32 %2, ptr %outvl
366 ret <vscale x 16 x i8> %1
369 define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
370 ; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8:
371 ; CHECK: # %bb.0: # %entry
372 ; CHECK-NEXT: vmv2r.v v6, v8
373 ; CHECK-NEXT: vmv2r.v v10, v8
374 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
375 ; CHECK-NEXT: vlseg3e8ff.v v6, (a0), v0.t
376 ; CHECK-NEXT: csrr a0, vl
377 ; CHECK-NEXT: sw a0, 0(a2)
380 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
381 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 1
382 %2 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 3
383 store i32 %2, ptr %outvl
384 ret <vscale x 16 x i8> %1
387 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr , i32)
388 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, <vscale x 16 x i1>, i32, i32)
390 define <vscale x 16 x i8> @test_vlseg4ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl) {
391 ; CHECK-LABEL: test_vlseg4ff_nxv16i8:
392 ; CHECK: # %bb.0: # %entry
393 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
394 ; CHECK-NEXT: vlseg4e8ff.v v6, (a0)
395 ; CHECK-NEXT: csrr a0, vl
396 ; CHECK-NEXT: sw a0, 0(a2)
399 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
400 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 1
401 %2 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 4
402 store i32 %2, ptr %outvl
403 ret <vscale x 16 x i8> %1
406 define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
407 ; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8:
408 ; CHECK: # %bb.0: # %entry
409 ; CHECK-NEXT: vmv2r.v v6, v8
410 ; CHECK-NEXT: vmv2r.v v10, v8
411 ; CHECK-NEXT: vmv2r.v v12, v8
412 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
413 ; CHECK-NEXT: vlseg4e8ff.v v6, (a0), v0.t
414 ; CHECK-NEXT: csrr a0, vl
415 ; CHECK-NEXT: sw a0, 0(a2)
418 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
419 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 1
420 %2 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} %0, 4
421 store i32 %2, ptr %outvl
422 ret <vscale x 16 x i8> %1
425 declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
426 declare {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32, i32)
428 define <vscale x 2 x i32> @test_vlseg2ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) {
429 ; CHECK-LABEL: test_vlseg2ff_nxv2i32:
430 ; CHECK: # %bb.0: # %entry
431 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
432 ; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
433 ; CHECK-NEXT: csrr a0, vl
434 ; CHECK-NEXT: sw a0, 0(a2)
437 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
438 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
439 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 2
440 store i32 %2, ptr %outvl
441 ret <vscale x 2 x i32> %1
444 define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
445 ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i32:
446 ; CHECK: # %bb.0: # %entry
447 ; CHECK-NEXT: vmv1r.v v7, v8
448 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
449 ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
450 ; CHECK-NEXT: csrr a0, vl
451 ; CHECK-NEXT: sw a0, 0(a2)
454 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
455 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
456 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 2
457 store i32 %2, ptr %outvl
458 ret <vscale x 2 x i32> %1
461 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
462 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32, i32)
464 define <vscale x 2 x i32> @test_vlseg3ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) {
465 ; CHECK-LABEL: test_vlseg3ff_nxv2i32:
466 ; CHECK: # %bb.0: # %entry
467 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
468 ; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
469 ; CHECK-NEXT: csrr a0, vl
470 ; CHECK-NEXT: sw a0, 0(a2)
473 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
474 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
475 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 3
476 store i32 %2, ptr %outvl
477 ret <vscale x 2 x i32> %1
480 define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
481 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32:
482 ; CHECK: # %bb.0: # %entry
483 ; CHECK-NEXT: vmv1r.v v7, v8
484 ; CHECK-NEXT: vmv1r.v v9, v8
485 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
486 ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
487 ; CHECK-NEXT: csrr a0, vl
488 ; CHECK-NEXT: sw a0, 0(a2)
491 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
492 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
493 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 3
494 store i32 %2, ptr %outvl
495 ret <vscale x 2 x i32> %1
498 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
499 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32, i32)
501 define <vscale x 2 x i32> @test_vlseg4ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) {
502 ; CHECK-LABEL: test_vlseg4ff_nxv2i32:
503 ; CHECK: # %bb.0: # %entry
504 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
505 ; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
506 ; CHECK-NEXT: csrr a0, vl
507 ; CHECK-NEXT: sw a0, 0(a2)
510 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
511 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
512 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 4
513 store i32 %2, ptr %outvl
514 ret <vscale x 2 x i32> %1
517 define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
518 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32:
519 ; CHECK: # %bb.0: # %entry
520 ; CHECK-NEXT: vmv1r.v v7, v8
521 ; CHECK-NEXT: vmv1r.v v9, v8
522 ; CHECK-NEXT: vmv1r.v v10, v8
523 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
524 ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
525 ; CHECK-NEXT: csrr a0, vl
526 ; CHECK-NEXT: sw a0, 0(a2)
529 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
530 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
531 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 4
532 store i32 %2, ptr %outvl
533 ret <vscale x 2 x i32> %1
536 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
537 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32, i32)
539 define <vscale x 2 x i32> @test_vlseg5ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) {
540 ; CHECK-LABEL: test_vlseg5ff_nxv2i32:
541 ; CHECK: # %bb.0: # %entry
542 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
543 ; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
544 ; CHECK-NEXT: csrr a0, vl
545 ; CHECK-NEXT: sw a0, 0(a2)
548 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
549 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
550 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 5
551 store i32 %2, ptr %outvl
552 ret <vscale x 2 x i32> %1
555 define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
556 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32:
557 ; CHECK: # %bb.0: # %entry
558 ; CHECK-NEXT: vmv1r.v v7, v8
559 ; CHECK-NEXT: vmv1r.v v9, v8
560 ; CHECK-NEXT: vmv1r.v v10, v8
561 ; CHECK-NEXT: vmv1r.v v11, v8
562 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
563 ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
564 ; CHECK-NEXT: csrr a0, vl
565 ; CHECK-NEXT: sw a0, 0(a2)
568 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
569 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
570 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 5
571 store i32 %2, ptr %outvl
572 ret <vscale x 2 x i32> %1
575 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
576 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32, i32)
578 define <vscale x 2 x i32> @test_vlseg6ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) {
579 ; CHECK-LABEL: test_vlseg6ff_nxv2i32:
580 ; CHECK: # %bb.0: # %entry
581 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
582 ; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
583 ; CHECK-NEXT: csrr a0, vl
584 ; CHECK-NEXT: sw a0, 0(a2)
587 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
588 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
589 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 6
590 store i32 %2, ptr %outvl
591 ret <vscale x 2 x i32> %1
594 define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
595 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32:
596 ; CHECK: # %bb.0: # %entry
597 ; CHECK-NEXT: vmv1r.v v7, v8
598 ; CHECK-NEXT: vmv1r.v v9, v8
599 ; CHECK-NEXT: vmv1r.v v10, v8
600 ; CHECK-NEXT: vmv1r.v v11, v8
601 ; CHECK-NEXT: vmv1r.v v12, v8
602 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
603 ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
604 ; CHECK-NEXT: csrr a0, vl
605 ; CHECK-NEXT: sw a0, 0(a2)
608 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
609 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
610 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 6
611 store i32 %2, ptr %outvl
612 ret <vscale x 2 x i32> %1
615 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
616 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32, i32)
618 define <vscale x 2 x i32> @test_vlseg7ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) {
619 ; CHECK-LABEL: test_vlseg7ff_nxv2i32:
620 ; CHECK: # %bb.0: # %entry
621 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
622 ; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
623 ; CHECK-NEXT: csrr a0, vl
624 ; CHECK-NEXT: sw a0, 0(a2)
627 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
628 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
629 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 7
630 store i32 %2, ptr %outvl
631 ret <vscale x 2 x i32> %1
634 define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
635 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32:
636 ; CHECK: # %bb.0: # %entry
637 ; CHECK-NEXT: vmv1r.v v7, v8
638 ; CHECK-NEXT: vmv1r.v v9, v8
639 ; CHECK-NEXT: vmv1r.v v10, v8
640 ; CHECK-NEXT: vmv1r.v v11, v8
641 ; CHECK-NEXT: vmv1r.v v12, v8
642 ; CHECK-NEXT: vmv1r.v v13, v8
643 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
644 ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
645 ; CHECK-NEXT: csrr a0, vl
646 ; CHECK-NEXT: sw a0, 0(a2)
649 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
650 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
651 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 7
652 store i32 %2, ptr %outvl
653 ret <vscale x 2 x i32> %1
656 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i32)
657 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, <vscale x 2 x i1>, i32, i32)
659 define <vscale x 2 x i32> @test_vlseg8ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl) {
660 ; CHECK-LABEL: test_vlseg8ff_nxv2i32:
661 ; CHECK: # %bb.0: # %entry
662 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
663 ; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
664 ; CHECK-NEXT: csrr a0, vl
665 ; CHECK-NEXT: sw a0, 0(a2)
668 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
669 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
670 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 8
671 store i32 %2, ptr %outvl
672 ret <vscale x 2 x i32> %1
675 define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
676 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32:
677 ; CHECK: # %bb.0: # %entry
678 ; CHECK-NEXT: vmv1r.v v7, v8
679 ; CHECK-NEXT: vmv1r.v v9, v8
680 ; CHECK-NEXT: vmv1r.v v10, v8
681 ; CHECK-NEXT: vmv1r.v v11, v8
682 ; CHECK-NEXT: vmv1r.v v12, v8
683 ; CHECK-NEXT: vmv1r.v v13, v8
684 ; CHECK-NEXT: vmv1r.v v14, v8
685 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
686 ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
687 ; CHECK-NEXT: csrr a0, vl
688 ; CHECK-NEXT: sw a0, 0(a2)
691 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
692 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 1
693 %2 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} %0, 8
694 store i32 %2, ptr %outvl
695 ret <vscale x 2 x i32> %1
698 declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
699 declare {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32, i32)
701 define <vscale x 4 x i16> @test_vlseg2ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) {
702 ; CHECK-LABEL: test_vlseg2ff_nxv4i16:
703 ; CHECK: # %bb.0: # %entry
704 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
705 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
706 ; CHECK-NEXT: csrr a0, vl
707 ; CHECK-NEXT: sw a0, 0(a2)
710 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
711 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
712 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 2
713 store i32 %2, ptr %outvl
714 ret <vscale x 4 x i16> %1
717 define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
718 ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i16:
719 ; CHECK: # %bb.0: # %entry
720 ; CHECK-NEXT: vmv1r.v v7, v8
721 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
722 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
723 ; CHECK-NEXT: csrr a0, vl
724 ; CHECK-NEXT: sw a0, 0(a2)
727 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
728 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
729 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 2
730 store i32 %2, ptr %outvl
731 ret <vscale x 4 x i16> %1
734 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
735 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32, i32)
737 define <vscale x 4 x i16> @test_vlseg3ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) {
738 ; CHECK-LABEL: test_vlseg3ff_nxv4i16:
739 ; CHECK: # %bb.0: # %entry
740 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
741 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
742 ; CHECK-NEXT: csrr a0, vl
743 ; CHECK-NEXT: sw a0, 0(a2)
746 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
747 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
748 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 3
749 store i32 %2, ptr %outvl
750 ret <vscale x 4 x i16> %1
753 define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
754 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16:
755 ; CHECK: # %bb.0: # %entry
756 ; CHECK-NEXT: vmv1r.v v7, v8
757 ; CHECK-NEXT: vmv1r.v v9, v8
758 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
759 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
760 ; CHECK-NEXT: csrr a0, vl
761 ; CHECK-NEXT: sw a0, 0(a2)
764 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
765 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
766 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 3
767 store i32 %2, ptr %outvl
768 ret <vscale x 4 x i16> %1
771 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
772 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32, i32)
774 define <vscale x 4 x i16> @test_vlseg4ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) {
775 ; CHECK-LABEL: test_vlseg4ff_nxv4i16:
776 ; CHECK: # %bb.0: # %entry
777 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
778 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
779 ; CHECK-NEXT: csrr a0, vl
780 ; CHECK-NEXT: sw a0, 0(a2)
783 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
784 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
785 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 4
786 store i32 %2, ptr %outvl
787 ret <vscale x 4 x i16> %1
790 define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
791 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16:
792 ; CHECK: # %bb.0: # %entry
793 ; CHECK-NEXT: vmv1r.v v7, v8
794 ; CHECK-NEXT: vmv1r.v v9, v8
795 ; CHECK-NEXT: vmv1r.v v10, v8
796 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
797 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
798 ; CHECK-NEXT: csrr a0, vl
799 ; CHECK-NEXT: sw a0, 0(a2)
802 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
803 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
804 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 4
805 store i32 %2, ptr %outvl
806 ret <vscale x 4 x i16> %1
809 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
810 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32, i32)
812 define <vscale x 4 x i16> @test_vlseg5ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) {
813 ; CHECK-LABEL: test_vlseg5ff_nxv4i16:
814 ; CHECK: # %bb.0: # %entry
815 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
816 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
817 ; CHECK-NEXT: csrr a0, vl
818 ; CHECK-NEXT: sw a0, 0(a2)
821 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
822 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
823 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 5
824 store i32 %2, ptr %outvl
825 ret <vscale x 4 x i16> %1
828 define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
829 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16:
830 ; CHECK: # %bb.0: # %entry
831 ; CHECK-NEXT: vmv1r.v v7, v8
832 ; CHECK-NEXT: vmv1r.v v9, v8
833 ; CHECK-NEXT: vmv1r.v v10, v8
834 ; CHECK-NEXT: vmv1r.v v11, v8
835 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
836 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
837 ; CHECK-NEXT: csrr a0, vl
838 ; CHECK-NEXT: sw a0, 0(a2)
841 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
842 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
843 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 5
844 store i32 %2, ptr %outvl
845 ret <vscale x 4 x i16> %1
848 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
849 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32, i32)
851 define <vscale x 4 x i16> @test_vlseg6ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) {
852 ; CHECK-LABEL: test_vlseg6ff_nxv4i16:
853 ; CHECK: # %bb.0: # %entry
854 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
855 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
856 ; CHECK-NEXT: csrr a0, vl
857 ; CHECK-NEXT: sw a0, 0(a2)
860 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
861 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
862 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 6
863 store i32 %2, ptr %outvl
864 ret <vscale x 4 x i16> %1
867 define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
868 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16:
869 ; CHECK: # %bb.0: # %entry
870 ; CHECK-NEXT: vmv1r.v v7, v8
871 ; CHECK-NEXT: vmv1r.v v9, v8
872 ; CHECK-NEXT: vmv1r.v v10, v8
873 ; CHECK-NEXT: vmv1r.v v11, v8
874 ; CHECK-NEXT: vmv1r.v v12, v8
875 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
876 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
877 ; CHECK-NEXT: csrr a0, vl
878 ; CHECK-NEXT: sw a0, 0(a2)
881 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
882 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
883 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 6
884 store i32 %2, ptr %outvl
885 ret <vscale x 4 x i16> %1
888 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
889 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32, i32)
891 define <vscale x 4 x i16> @test_vlseg7ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) {
892 ; CHECK-LABEL: test_vlseg7ff_nxv4i16:
893 ; CHECK: # %bb.0: # %entry
894 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
895 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
896 ; CHECK-NEXT: csrr a0, vl
897 ; CHECK-NEXT: sw a0, 0(a2)
900 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
901 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
902 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 7
903 store i32 %2, ptr %outvl
904 ret <vscale x 4 x i16> %1
907 define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
908 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16:
909 ; CHECK: # %bb.0: # %entry
910 ; CHECK-NEXT: vmv1r.v v7, v8
911 ; CHECK-NEXT: vmv1r.v v9, v8
912 ; CHECK-NEXT: vmv1r.v v10, v8
913 ; CHECK-NEXT: vmv1r.v v11, v8
914 ; CHECK-NEXT: vmv1r.v v12, v8
915 ; CHECK-NEXT: vmv1r.v v13, v8
916 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
917 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
918 ; CHECK-NEXT: csrr a0, vl
919 ; CHECK-NEXT: sw a0, 0(a2)
922 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
923 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
924 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 7
925 store i32 %2, ptr %outvl
926 ret <vscale x 4 x i16> %1
929 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr , i32)
930 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, <vscale x 4 x i1>, i32, i32)
932 define <vscale x 4 x i16> @test_vlseg8ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl) {
933 ; CHECK-LABEL: test_vlseg8ff_nxv4i16:
934 ; CHECK: # %bb.0: # %entry
935 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
936 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
937 ; CHECK-NEXT: csrr a0, vl
938 ; CHECK-NEXT: sw a0, 0(a2)
941 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
942 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
943 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 8
944 store i32 %2, ptr %outvl
945 ret <vscale x 4 x i16> %1
948 define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
949 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16:
950 ; CHECK: # %bb.0: # %entry
951 ; CHECK-NEXT: vmv1r.v v7, v8
952 ; CHECK-NEXT: vmv1r.v v9, v8
953 ; CHECK-NEXT: vmv1r.v v10, v8
954 ; CHECK-NEXT: vmv1r.v v11, v8
955 ; CHECK-NEXT: vmv1r.v v12, v8
956 ; CHECK-NEXT: vmv1r.v v13, v8
957 ; CHECK-NEXT: vmv1r.v v14, v8
958 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
959 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
960 ; CHECK-NEXT: csrr a0, vl
961 ; CHECK-NEXT: sw a0, 0(a2)
964 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
965 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 1
966 %2 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} %0, 8
967 store i32 %2, ptr %outvl
968 ret <vscale x 4 x i16> %1
971 declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
972 declare {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32, i32)
974 define <vscale x 1 x i32> @test_vlseg2ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) {
975 ; CHECK-LABEL: test_vlseg2ff_nxv1i32:
976 ; CHECK: # %bb.0: # %entry
977 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
978 ; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
979 ; CHECK-NEXT: csrr a0, vl
980 ; CHECK-NEXT: sw a0, 0(a2)
983 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
984 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
985 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 2
986 store i32 %2, ptr %outvl
987 ret <vscale x 1 x i32> %1
990 define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
991 ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i32:
992 ; CHECK: # %bb.0: # %entry
993 ; CHECK-NEXT: vmv1r.v v7, v8
994 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
995 ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
996 ; CHECK-NEXT: csrr a0, vl
997 ; CHECK-NEXT: sw a0, 0(a2)
1000 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1001 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1002 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 2
1003 store i32 %2, ptr %outvl
1004 ret <vscale x 1 x i32> %1
1007 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
1008 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32, i32)
1010 define <vscale x 1 x i32> @test_vlseg3ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) {
1011 ; CHECK-LABEL: test_vlseg3ff_nxv1i32:
1012 ; CHECK: # %bb.0: # %entry
1013 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1014 ; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
1015 ; CHECK-NEXT: csrr a0, vl
1016 ; CHECK-NEXT: sw a0, 0(a2)
1019 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
1020 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1021 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 3
1022 store i32 %2, ptr %outvl
1023 ret <vscale x 1 x i32> %1
1026 define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
1027 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32:
1028 ; CHECK: # %bb.0: # %entry
1029 ; CHECK-NEXT: vmv1r.v v7, v8
1030 ; CHECK-NEXT: vmv1r.v v9, v8
1031 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1032 ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
1033 ; CHECK-NEXT: csrr a0, vl
1034 ; CHECK-NEXT: sw a0, 0(a2)
1037 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1038 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1039 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 3
1040 store i32 %2, ptr %outvl
1041 ret <vscale x 1 x i32> %1
1044 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
1045 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32, i32)
1047 define <vscale x 1 x i32> @test_vlseg4ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) {
1048 ; CHECK-LABEL: test_vlseg4ff_nxv1i32:
1049 ; CHECK: # %bb.0: # %entry
1050 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1051 ; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
1052 ; CHECK-NEXT: csrr a0, vl
1053 ; CHECK-NEXT: sw a0, 0(a2)
1056 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
1057 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1058 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 4
1059 store i32 %2, ptr %outvl
1060 ret <vscale x 1 x i32> %1
1063 define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
1064 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32:
1065 ; CHECK: # %bb.0: # %entry
1066 ; CHECK-NEXT: vmv1r.v v7, v8
1067 ; CHECK-NEXT: vmv1r.v v9, v8
1068 ; CHECK-NEXT: vmv1r.v v10, v8
1069 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1070 ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
1071 ; CHECK-NEXT: csrr a0, vl
1072 ; CHECK-NEXT: sw a0, 0(a2)
1075 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1076 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1077 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 4
1078 store i32 %2, ptr %outvl
1079 ret <vscale x 1 x i32> %1
1082 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
1083 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32, i32)
1085 define <vscale x 1 x i32> @test_vlseg5ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) {
1086 ; CHECK-LABEL: test_vlseg5ff_nxv1i32:
1087 ; CHECK: # %bb.0: # %entry
1088 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1089 ; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
1090 ; CHECK-NEXT: csrr a0, vl
1091 ; CHECK-NEXT: sw a0, 0(a2)
1094 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
1095 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1096 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 5
1097 store i32 %2, ptr %outvl
1098 ret <vscale x 1 x i32> %1
1101 define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
1102 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32:
1103 ; CHECK: # %bb.0: # %entry
1104 ; CHECK-NEXT: vmv1r.v v7, v8
1105 ; CHECK-NEXT: vmv1r.v v9, v8
1106 ; CHECK-NEXT: vmv1r.v v10, v8
1107 ; CHECK-NEXT: vmv1r.v v11, v8
1108 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1109 ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
1110 ; CHECK-NEXT: csrr a0, vl
1111 ; CHECK-NEXT: sw a0, 0(a2)
1114 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1115 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1116 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 5
1117 store i32 %2, ptr %outvl
1118 ret <vscale x 1 x i32> %1
1121 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
1122 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32, i32)
1124 define <vscale x 1 x i32> @test_vlseg6ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) {
1125 ; CHECK-LABEL: test_vlseg6ff_nxv1i32:
1126 ; CHECK: # %bb.0: # %entry
1127 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1128 ; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
1129 ; CHECK-NEXT: csrr a0, vl
1130 ; CHECK-NEXT: sw a0, 0(a2)
1133 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
1134 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1135 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 6
1136 store i32 %2, ptr %outvl
1137 ret <vscale x 1 x i32> %1
1140 define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
1141 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32:
1142 ; CHECK: # %bb.0: # %entry
1143 ; CHECK-NEXT: vmv1r.v v7, v8
1144 ; CHECK-NEXT: vmv1r.v v9, v8
1145 ; CHECK-NEXT: vmv1r.v v10, v8
1146 ; CHECK-NEXT: vmv1r.v v11, v8
1147 ; CHECK-NEXT: vmv1r.v v12, v8
1148 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1149 ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
1150 ; CHECK-NEXT: csrr a0, vl
1151 ; CHECK-NEXT: sw a0, 0(a2)
1154 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1155 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1156 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 6
1157 store i32 %2, ptr %outvl
1158 ret <vscale x 1 x i32> %1
1161 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
1162 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32, i32)
1164 define <vscale x 1 x i32> @test_vlseg7ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) {
1165 ; CHECK-LABEL: test_vlseg7ff_nxv1i32:
1166 ; CHECK: # %bb.0: # %entry
1167 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1168 ; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
1169 ; CHECK-NEXT: csrr a0, vl
1170 ; CHECK-NEXT: sw a0, 0(a2)
1173 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
1174 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1175 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 7
1176 store i32 %2, ptr %outvl
1177 ret <vscale x 1 x i32> %1
1180 define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
1181 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32:
1182 ; CHECK: # %bb.0: # %entry
1183 ; CHECK-NEXT: vmv1r.v v7, v8
1184 ; CHECK-NEXT: vmv1r.v v9, v8
1185 ; CHECK-NEXT: vmv1r.v v10, v8
1186 ; CHECK-NEXT: vmv1r.v v11, v8
1187 ; CHECK-NEXT: vmv1r.v v12, v8
1188 ; CHECK-NEXT: vmv1r.v v13, v8
1189 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1190 ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
1191 ; CHECK-NEXT: csrr a0, vl
1192 ; CHECK-NEXT: sw a0, 0(a2)
1195 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1196 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1197 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 7
1198 store i32 %2, ptr %outvl
1199 ret <vscale x 1 x i32> %1
1202 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i32)
1203 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, <vscale x 1 x i1>, i32, i32)
1205 define <vscale x 1 x i32> @test_vlseg8ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl) {
1206 ; CHECK-LABEL: test_vlseg8ff_nxv1i32:
1207 ; CHECK: # %bb.0: # %entry
1208 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
1209 ; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
1210 ; CHECK-NEXT: csrr a0, vl
1211 ; CHECK-NEXT: sw a0, 0(a2)
1214 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
1215 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1216 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 8
1217 store i32 %2, ptr %outvl
1218 ret <vscale x 1 x i32> %1
1221 define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
1222 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32:
1223 ; CHECK: # %bb.0: # %entry
1224 ; CHECK-NEXT: vmv1r.v v7, v8
1225 ; CHECK-NEXT: vmv1r.v v9, v8
1226 ; CHECK-NEXT: vmv1r.v v10, v8
1227 ; CHECK-NEXT: vmv1r.v v11, v8
1228 ; CHECK-NEXT: vmv1r.v v12, v8
1229 ; CHECK-NEXT: vmv1r.v v13, v8
1230 ; CHECK-NEXT: vmv1r.v v14, v8
1231 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
1232 ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
1233 ; CHECK-NEXT: csrr a0, vl
1234 ; CHECK-NEXT: sw a0, 0(a2)
1237 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1238 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 1
1239 %2 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} %0, 8
1240 store i32 %2, ptr %outvl
1241 ret <vscale x 1 x i32> %1
1244 declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i32)
1245 declare {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i32, i32)
1247 define <vscale x 8 x i16> @test_vlseg2ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl) {
1248 ; CHECK-LABEL: test_vlseg2ff_nxv8i16:
1249 ; CHECK: # %bb.0: # %entry
1250 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1251 ; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
1252 ; CHECK-NEXT: csrr a0, vl
1253 ; CHECK-NEXT: sw a0, 0(a2)
1256 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
1257 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 1
1258 %2 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 2
1259 store i32 %2, ptr %outvl
1260 ret <vscale x 8 x i16> %1
1263 define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1264 ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i16:
1265 ; CHECK: # %bb.0: # %entry
1266 ; CHECK-NEXT: vmv2r.v v6, v8
1267 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1268 ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
1269 ; CHECK-NEXT: csrr a0, vl
1270 ; CHECK-NEXT: sw a0, 0(a2)
1273 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1274 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 1
1275 %2 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 2
1276 store i32 %2, ptr %outvl
1277 ret <vscale x 8 x i16> %1
1280 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i32)
1281 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i32, i32)
1283 define <vscale x 8 x i16> @test_vlseg3ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl) {
1284 ; CHECK-LABEL: test_vlseg3ff_nxv8i16:
1285 ; CHECK: # %bb.0: # %entry
1286 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1287 ; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
1288 ; CHECK-NEXT: csrr a0, vl
1289 ; CHECK-NEXT: sw a0, 0(a2)
1292 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
1293 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 1
1294 %2 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 3
1295 store i32 %2, ptr %outvl
1296 ret <vscale x 8 x i16> %1
1299 define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1300 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16:
1301 ; CHECK: # %bb.0: # %entry
1302 ; CHECK-NEXT: vmv2r.v v6, v8
1303 ; CHECK-NEXT: vmv2r.v v10, v8
1304 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1305 ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
1306 ; CHECK-NEXT: csrr a0, vl
1307 ; CHECK-NEXT: sw a0, 0(a2)
1310 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1311 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 1
1312 %2 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 3
1313 store i32 %2, ptr %outvl
1314 ret <vscale x 8 x i16> %1
1317 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr , i32)
1318 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, <vscale x 8 x i1>, i32, i32)
1320 define <vscale x 8 x i16> @test_vlseg4ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl) {
1321 ; CHECK-LABEL: test_vlseg4ff_nxv8i16:
1322 ; CHECK: # %bb.0: # %entry
1323 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
1324 ; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
1325 ; CHECK-NEXT: csrr a0, vl
1326 ; CHECK-NEXT: sw a0, 0(a2)
1329 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
1330 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 1
1331 %2 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 4
1332 store i32 %2, ptr %outvl
1333 ret <vscale x 8 x i16> %1
1336 define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1337 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16:
1338 ; CHECK: # %bb.0: # %entry
1339 ; CHECK-NEXT: vmv2r.v v6, v8
1340 ; CHECK-NEXT: vmv2r.v v10, v8
1341 ; CHECK-NEXT: vmv2r.v v12, v8
1342 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
1343 ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
1344 ; CHECK-NEXT: csrr a0, vl
1345 ; CHECK-NEXT: sw a0, 0(a2)
1348 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1349 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 1
1350 %2 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} %0, 4
1351 store i32 %2, ptr %outvl
1352 ret <vscale x 8 x i16> %1
1355 declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1356 declare {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32, i32)
1358 define <vscale x 8 x i8> @test_vlseg2ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
1359 ; CHECK-LABEL: test_vlseg2ff_nxv8i8:
1360 ; CHECK: # %bb.0: # %entry
1361 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1362 ; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
1363 ; CHECK-NEXT: csrr a0, vl
1364 ; CHECK-NEXT: sw a0, 0(a2)
1367 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
1368 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1369 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 2
1370 store i32 %2, ptr %outvl
1371 ret <vscale x 8 x i8> %1
1374 define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1375 ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i8:
1376 ; CHECK: # %bb.0: # %entry
1377 ; CHECK-NEXT: vmv1r.v v7, v8
1378 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1379 ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
1380 ; CHECK-NEXT: csrr a0, vl
1381 ; CHECK-NEXT: sw a0, 0(a2)
1384 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1385 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1386 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 2
1387 store i32 %2, ptr %outvl
1388 ret <vscale x 8 x i8> %1
1391 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1392 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32, i32)
1394 define <vscale x 8 x i8> @test_vlseg3ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
1395 ; CHECK-LABEL: test_vlseg3ff_nxv8i8:
1396 ; CHECK: # %bb.0: # %entry
1397 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1398 ; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
1399 ; CHECK-NEXT: csrr a0, vl
1400 ; CHECK-NEXT: sw a0, 0(a2)
1403 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
1404 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1405 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 3
1406 store i32 %2, ptr %outvl
1407 ret <vscale x 8 x i8> %1
1410 define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1411 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8:
1412 ; CHECK: # %bb.0: # %entry
1413 ; CHECK-NEXT: vmv1r.v v7, v8
1414 ; CHECK-NEXT: vmv1r.v v9, v8
1415 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1416 ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
1417 ; CHECK-NEXT: csrr a0, vl
1418 ; CHECK-NEXT: sw a0, 0(a2)
1421 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1422 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1423 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 3
1424 store i32 %2, ptr %outvl
1425 ret <vscale x 8 x i8> %1
1428 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1429 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32, i32)
1431 define <vscale x 8 x i8> @test_vlseg4ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
1432 ; CHECK-LABEL: test_vlseg4ff_nxv8i8:
1433 ; CHECK: # %bb.0: # %entry
1434 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1435 ; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
1436 ; CHECK-NEXT: csrr a0, vl
1437 ; CHECK-NEXT: sw a0, 0(a2)
1440 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
1441 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1442 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 4
1443 store i32 %2, ptr %outvl
1444 ret <vscale x 8 x i8> %1
1447 define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1448 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8:
1449 ; CHECK: # %bb.0: # %entry
1450 ; CHECK-NEXT: vmv1r.v v7, v8
1451 ; CHECK-NEXT: vmv1r.v v9, v8
1452 ; CHECK-NEXT: vmv1r.v v10, v8
1453 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1454 ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
1455 ; CHECK-NEXT: csrr a0, vl
1456 ; CHECK-NEXT: sw a0, 0(a2)
1459 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1460 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1461 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 4
1462 store i32 %2, ptr %outvl
1463 ret <vscale x 8 x i8> %1
1466 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1467 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32, i32)
1469 define <vscale x 8 x i8> @test_vlseg5ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
1470 ; CHECK-LABEL: test_vlseg5ff_nxv8i8:
1471 ; CHECK: # %bb.0: # %entry
1472 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1473 ; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
1474 ; CHECK-NEXT: csrr a0, vl
1475 ; CHECK-NEXT: sw a0, 0(a2)
1478 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
1479 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1480 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 5
1481 store i32 %2, ptr %outvl
1482 ret <vscale x 8 x i8> %1
1485 define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1486 ; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8:
1487 ; CHECK: # %bb.0: # %entry
1488 ; CHECK-NEXT: vmv1r.v v7, v8
1489 ; CHECK-NEXT: vmv1r.v v9, v8
1490 ; CHECK-NEXT: vmv1r.v v10, v8
1491 ; CHECK-NEXT: vmv1r.v v11, v8
1492 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1493 ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
1494 ; CHECK-NEXT: csrr a0, vl
1495 ; CHECK-NEXT: sw a0, 0(a2)
1498 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1499 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1500 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 5
1501 store i32 %2, ptr %outvl
1502 ret <vscale x 8 x i8> %1
1505 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1506 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32, i32)
1508 define <vscale x 8 x i8> @test_vlseg6ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
1509 ; CHECK-LABEL: test_vlseg6ff_nxv8i8:
1510 ; CHECK: # %bb.0: # %entry
1511 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1512 ; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
1513 ; CHECK-NEXT: csrr a0, vl
1514 ; CHECK-NEXT: sw a0, 0(a2)
1517 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
1518 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1519 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 6
1520 store i32 %2, ptr %outvl
1521 ret <vscale x 8 x i8> %1
1524 define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1525 ; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8:
1526 ; CHECK: # %bb.0: # %entry
1527 ; CHECK-NEXT: vmv1r.v v7, v8
1528 ; CHECK-NEXT: vmv1r.v v9, v8
1529 ; CHECK-NEXT: vmv1r.v v10, v8
1530 ; CHECK-NEXT: vmv1r.v v11, v8
1531 ; CHECK-NEXT: vmv1r.v v12, v8
1532 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1533 ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
1534 ; CHECK-NEXT: csrr a0, vl
1535 ; CHECK-NEXT: sw a0, 0(a2)
1538 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1539 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1540 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 6
1541 store i32 %2, ptr %outvl
1542 ret <vscale x 8 x i8> %1
1545 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1546 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32, i32)
1548 define <vscale x 8 x i8> @test_vlseg7ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
1549 ; CHECK-LABEL: test_vlseg7ff_nxv8i8:
1550 ; CHECK: # %bb.0: # %entry
1551 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1552 ; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
1553 ; CHECK-NEXT: csrr a0, vl
1554 ; CHECK-NEXT: sw a0, 0(a2)
1557 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
1558 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1559 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 7
1560 store i32 %2, ptr %outvl
1561 ret <vscale x 8 x i8> %1
1564 define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1565 ; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8:
1566 ; CHECK: # %bb.0: # %entry
1567 ; CHECK-NEXT: vmv1r.v v7, v8
1568 ; CHECK-NEXT: vmv1r.v v9, v8
1569 ; CHECK-NEXT: vmv1r.v v10, v8
1570 ; CHECK-NEXT: vmv1r.v v11, v8
1571 ; CHECK-NEXT: vmv1r.v v12, v8
1572 ; CHECK-NEXT: vmv1r.v v13, v8
1573 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1574 ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
1575 ; CHECK-NEXT: csrr a0, vl
1576 ; CHECK-NEXT: sw a0, 0(a2)
1579 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1580 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1581 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 7
1582 store i32 %2, ptr %outvl
1583 ret <vscale x 8 x i8> %1
1586 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr , i32)
1587 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, <vscale x 8 x i1>, i32, i32)
1589 define <vscale x 8 x i8> @test_vlseg8ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
1590 ; CHECK-LABEL: test_vlseg8ff_nxv8i8:
1591 ; CHECK: # %bb.0: # %entry
1592 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
1593 ; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
1594 ; CHECK-NEXT: csrr a0, vl
1595 ; CHECK-NEXT: sw a0, 0(a2)
1598 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
1599 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1600 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 8
1601 store i32 %2, ptr %outvl
1602 ret <vscale x 8 x i8> %1
1605 define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1606 ; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8:
1607 ; CHECK: # %bb.0: # %entry
1608 ; CHECK-NEXT: vmv1r.v v7, v8
1609 ; CHECK-NEXT: vmv1r.v v9, v8
1610 ; CHECK-NEXT: vmv1r.v v10, v8
1611 ; CHECK-NEXT: vmv1r.v v11, v8
1612 ; CHECK-NEXT: vmv1r.v v12, v8
1613 ; CHECK-NEXT: vmv1r.v v13, v8
1614 ; CHECK-NEXT: vmv1r.v v14, v8
1615 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
1616 ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
1617 ; CHECK-NEXT: csrr a0, vl
1618 ; CHECK-NEXT: sw a0, 0(a2)
1621 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1622 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 1
1623 %2 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} %0, 8
1624 store i32 %2, ptr %outvl
1625 ret <vscale x 8 x i8> %1
1628 declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr , i32)
1629 declare {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, <vscale x 8 x i1>, i32, i32)
1631 define <vscale x 8 x i32> @test_vlseg2ff_nxv8i32(ptr %base, i32 %vl, ptr %outvl) {
1632 ; CHECK-LABEL: test_vlseg2ff_nxv8i32:
1633 ; CHECK: # %bb.0: # %entry
1634 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
1635 ; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
1636 ; CHECK-NEXT: csrr a0, vl
1637 ; CHECK-NEXT: sw a0, 0(a2)
1640 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %vl)
1641 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} %0, 1
1642 %2 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} %0, 2
1643 store i32 %2, ptr %outvl
1644 ret <vscale x 8 x i32> %1
1647 define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
1648 ; CHECK-LABEL: test_vlseg2ff_mask_nxv8i32:
1649 ; CHECK: # %bb.0: # %entry
1650 ; CHECK-NEXT: vmv4r.v v4, v8
1651 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
1652 ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
1653 ; CHECK-NEXT: csrr a0, vl
1654 ; CHECK-NEXT: sw a0, 0(a2)
1657 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1658 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} %0, 1
1659 %2 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} %0, 2
1660 store i32 %2, ptr %outvl
1661 ret <vscale x 8 x i32> %1
1664 declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1665 declare {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32, i32)
1667 define <vscale x 4 x i8> @test_vlseg2ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
1668 ; CHECK-LABEL: test_vlseg2ff_nxv4i8:
1669 ; CHECK: # %bb.0: # %entry
1670 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1671 ; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
1672 ; CHECK-NEXT: csrr a0, vl
1673 ; CHECK-NEXT: sw a0, 0(a2)
1676 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
1677 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1678 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 2
1679 store i32 %2, ptr %outvl
1680 ret <vscale x 4 x i8> %1
1683 define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
1684 ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i8:
1685 ; CHECK: # %bb.0: # %entry
1686 ; CHECK-NEXT: vmv1r.v v7, v8
1687 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1688 ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
1689 ; CHECK-NEXT: csrr a0, vl
1690 ; CHECK-NEXT: sw a0, 0(a2)
1693 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1694 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1695 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 2
1696 store i32 %2, ptr %outvl
1697 ret <vscale x 4 x i8> %1
1700 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1701 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32, i32)
1703 define <vscale x 4 x i8> @test_vlseg3ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
1704 ; CHECK-LABEL: test_vlseg3ff_nxv4i8:
1705 ; CHECK: # %bb.0: # %entry
1706 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1707 ; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
1708 ; CHECK-NEXT: csrr a0, vl
1709 ; CHECK-NEXT: sw a0, 0(a2)
1712 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
1713 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1714 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 3
1715 store i32 %2, ptr %outvl
1716 ret <vscale x 4 x i8> %1
1719 define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
1720 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8:
1721 ; CHECK: # %bb.0: # %entry
1722 ; CHECK-NEXT: vmv1r.v v7, v8
1723 ; CHECK-NEXT: vmv1r.v v9, v8
1724 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1725 ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
1726 ; CHECK-NEXT: csrr a0, vl
1727 ; CHECK-NEXT: sw a0, 0(a2)
1730 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1731 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1732 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 3
1733 store i32 %2, ptr %outvl
1734 ret <vscale x 4 x i8> %1
1737 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1738 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32, i32)
1740 define <vscale x 4 x i8> @test_vlseg4ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
1741 ; CHECK-LABEL: test_vlseg4ff_nxv4i8:
1742 ; CHECK: # %bb.0: # %entry
1743 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1744 ; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
1745 ; CHECK-NEXT: csrr a0, vl
1746 ; CHECK-NEXT: sw a0, 0(a2)
1749 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
1750 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1751 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 4
1752 store i32 %2, ptr %outvl
1753 ret <vscale x 4 x i8> %1
1756 define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
1757 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8:
1758 ; CHECK: # %bb.0: # %entry
1759 ; CHECK-NEXT: vmv1r.v v7, v8
1760 ; CHECK-NEXT: vmv1r.v v9, v8
1761 ; CHECK-NEXT: vmv1r.v v10, v8
1762 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1763 ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
1764 ; CHECK-NEXT: csrr a0, vl
1765 ; CHECK-NEXT: sw a0, 0(a2)
1768 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1769 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1770 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 4
1771 store i32 %2, ptr %outvl
1772 ret <vscale x 4 x i8> %1
1775 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1776 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32, i32)
1778 define <vscale x 4 x i8> @test_vlseg5ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
1779 ; CHECK-LABEL: test_vlseg5ff_nxv4i8:
1780 ; CHECK: # %bb.0: # %entry
1781 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1782 ; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
1783 ; CHECK-NEXT: csrr a0, vl
1784 ; CHECK-NEXT: sw a0, 0(a2)
1787 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
1788 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1789 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 5
1790 store i32 %2, ptr %outvl
1791 ret <vscale x 4 x i8> %1
1794 define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
1795 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8:
1796 ; CHECK: # %bb.0: # %entry
1797 ; CHECK-NEXT: vmv1r.v v7, v8
1798 ; CHECK-NEXT: vmv1r.v v9, v8
1799 ; CHECK-NEXT: vmv1r.v v10, v8
1800 ; CHECK-NEXT: vmv1r.v v11, v8
1801 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1802 ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
1803 ; CHECK-NEXT: csrr a0, vl
1804 ; CHECK-NEXT: sw a0, 0(a2)
1807 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1808 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1809 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 5
1810 store i32 %2, ptr %outvl
1811 ret <vscale x 4 x i8> %1
1814 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1815 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32, i32)
1817 define <vscale x 4 x i8> @test_vlseg6ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
1818 ; CHECK-LABEL: test_vlseg6ff_nxv4i8:
1819 ; CHECK: # %bb.0: # %entry
1820 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1821 ; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
1822 ; CHECK-NEXT: csrr a0, vl
1823 ; CHECK-NEXT: sw a0, 0(a2)
1826 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
1827 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1828 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 6
1829 store i32 %2, ptr %outvl
1830 ret <vscale x 4 x i8> %1
1833 define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
1834 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8:
1835 ; CHECK: # %bb.0: # %entry
1836 ; CHECK-NEXT: vmv1r.v v7, v8
1837 ; CHECK-NEXT: vmv1r.v v9, v8
1838 ; CHECK-NEXT: vmv1r.v v10, v8
1839 ; CHECK-NEXT: vmv1r.v v11, v8
1840 ; CHECK-NEXT: vmv1r.v v12, v8
1841 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1842 ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
1843 ; CHECK-NEXT: csrr a0, vl
1844 ; CHECK-NEXT: sw a0, 0(a2)
1847 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1848 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1849 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 6
1850 store i32 %2, ptr %outvl
1851 ret <vscale x 4 x i8> %1
1854 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1855 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32, i32)
1857 define <vscale x 4 x i8> @test_vlseg7ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
1858 ; CHECK-LABEL: test_vlseg7ff_nxv4i8:
1859 ; CHECK: # %bb.0: # %entry
1860 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1861 ; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
1862 ; CHECK-NEXT: csrr a0, vl
1863 ; CHECK-NEXT: sw a0, 0(a2)
1866 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
1867 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1868 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 7
1869 store i32 %2, ptr %outvl
1870 ret <vscale x 4 x i8> %1
1873 define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
1874 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8:
1875 ; CHECK: # %bb.0: # %entry
1876 ; CHECK-NEXT: vmv1r.v v7, v8
1877 ; CHECK-NEXT: vmv1r.v v9, v8
1878 ; CHECK-NEXT: vmv1r.v v10, v8
1879 ; CHECK-NEXT: vmv1r.v v11, v8
1880 ; CHECK-NEXT: vmv1r.v v12, v8
1881 ; CHECK-NEXT: vmv1r.v v13, v8
1882 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1883 ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
1884 ; CHECK-NEXT: csrr a0, vl
1885 ; CHECK-NEXT: sw a0, 0(a2)
1888 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1889 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1890 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 7
1891 store i32 %2, ptr %outvl
1892 ret <vscale x 4 x i8> %1
1895 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr , i32)
1896 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, <vscale x 4 x i1>, i32, i32)
1898 define <vscale x 4 x i8> @test_vlseg8ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
1899 ; CHECK-LABEL: test_vlseg8ff_nxv4i8:
1900 ; CHECK: # %bb.0: # %entry
1901 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
1902 ; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
1903 ; CHECK-NEXT: csrr a0, vl
1904 ; CHECK-NEXT: sw a0, 0(a2)
1907 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
1908 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1909 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 8
1910 store i32 %2, ptr %outvl
1911 ret <vscale x 4 x i8> %1
1914 define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
1915 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8:
1916 ; CHECK: # %bb.0: # %entry
1917 ; CHECK-NEXT: vmv1r.v v7, v8
1918 ; CHECK-NEXT: vmv1r.v v9, v8
1919 ; CHECK-NEXT: vmv1r.v v10, v8
1920 ; CHECK-NEXT: vmv1r.v v11, v8
1921 ; CHECK-NEXT: vmv1r.v v12, v8
1922 ; CHECK-NEXT: vmv1r.v v13, v8
1923 ; CHECK-NEXT: vmv1r.v v14, v8
1924 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
1925 ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
1926 ; CHECK-NEXT: csrr a0, vl
1927 ; CHECK-NEXT: sw a0, 0(a2)
1930 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1931 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 1
1932 %2 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} %0, 8
1933 store i32 %2, ptr %outvl
1934 ret <vscale x 4 x i8> %1
1937 declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1938 declare {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32, i32)
1940 define <vscale x 1 x i16> @test_vlseg2ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) {
1941 ; CHECK-LABEL: test_vlseg2ff_nxv1i16:
1942 ; CHECK: # %bb.0: # %entry
1943 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1944 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
1945 ; CHECK-NEXT: csrr a0, vl
1946 ; CHECK-NEXT: sw a0, 0(a2)
1949 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
1950 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
1951 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 2
1952 store i32 %2, ptr %outvl
1953 ret <vscale x 1 x i16> %1
1956 define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
1957 ; CHECK-LABEL: test_vlseg2ff_mask_nxv1i16:
1958 ; CHECK: # %bb.0: # %entry
1959 ; CHECK-NEXT: vmv1r.v v7, v8
1960 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1961 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
1962 ; CHECK-NEXT: csrr a0, vl
1963 ; CHECK-NEXT: sw a0, 0(a2)
1966 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1967 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
1968 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 2
1969 store i32 %2, ptr %outvl
1970 ret <vscale x 1 x i16> %1
1973 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
1974 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32, i32)
1976 define <vscale x 1 x i16> @test_vlseg3ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) {
1977 ; CHECK-LABEL: test_vlseg3ff_nxv1i16:
1978 ; CHECK: # %bb.0: # %entry
1979 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
1980 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
1981 ; CHECK-NEXT: csrr a0, vl
1982 ; CHECK-NEXT: sw a0, 0(a2)
1985 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
1986 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
1987 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 3
1988 store i32 %2, ptr %outvl
1989 ret <vscale x 1 x i16> %1
1992 define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
1993 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16:
1994 ; CHECK: # %bb.0: # %entry
1995 ; CHECK-NEXT: vmv1r.v v7, v8
1996 ; CHECK-NEXT: vmv1r.v v9, v8
1997 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
1998 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
1999 ; CHECK-NEXT: csrr a0, vl
2000 ; CHECK-NEXT: sw a0, 0(a2)
2003 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2004 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2005 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 3
2006 store i32 %2, ptr %outvl
2007 ret <vscale x 1 x i16> %1
2010 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
2011 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32, i32)
2013 define <vscale x 1 x i16> @test_vlseg4ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) {
2014 ; CHECK-LABEL: test_vlseg4ff_nxv1i16:
2015 ; CHECK: # %bb.0: # %entry
2016 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2017 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
2018 ; CHECK-NEXT: csrr a0, vl
2019 ; CHECK-NEXT: sw a0, 0(a2)
2022 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
2023 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2024 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 4
2025 store i32 %2, ptr %outvl
2026 ret <vscale x 1 x i16> %1
2029 define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
2030 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16:
2031 ; CHECK: # %bb.0: # %entry
2032 ; CHECK-NEXT: vmv1r.v v7, v8
2033 ; CHECK-NEXT: vmv1r.v v9, v8
2034 ; CHECK-NEXT: vmv1r.v v10, v8
2035 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2036 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
2037 ; CHECK-NEXT: csrr a0, vl
2038 ; CHECK-NEXT: sw a0, 0(a2)
2041 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2042 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2043 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 4
2044 store i32 %2, ptr %outvl
2045 ret <vscale x 1 x i16> %1
2048 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
2049 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32, i32)
2051 define <vscale x 1 x i16> @test_vlseg5ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) {
2052 ; CHECK-LABEL: test_vlseg5ff_nxv1i16:
2053 ; CHECK: # %bb.0: # %entry
2054 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2055 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
2056 ; CHECK-NEXT: csrr a0, vl
2057 ; CHECK-NEXT: sw a0, 0(a2)
2060 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
2061 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2062 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 5
2063 store i32 %2, ptr %outvl
2064 ret <vscale x 1 x i16> %1
2067 define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
2068 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16:
2069 ; CHECK: # %bb.0: # %entry
2070 ; CHECK-NEXT: vmv1r.v v7, v8
2071 ; CHECK-NEXT: vmv1r.v v9, v8
2072 ; CHECK-NEXT: vmv1r.v v10, v8
2073 ; CHECK-NEXT: vmv1r.v v11, v8
2074 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2075 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
2076 ; CHECK-NEXT: csrr a0, vl
2077 ; CHECK-NEXT: sw a0, 0(a2)
2080 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2081 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2082 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 5
2083 store i32 %2, ptr %outvl
2084 ret <vscale x 1 x i16> %1
2087 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
2088 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32, i32)
2090 define <vscale x 1 x i16> @test_vlseg6ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) {
2091 ; CHECK-LABEL: test_vlseg6ff_nxv1i16:
2092 ; CHECK: # %bb.0: # %entry
2093 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2094 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
2095 ; CHECK-NEXT: csrr a0, vl
2096 ; CHECK-NEXT: sw a0, 0(a2)
2099 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
2100 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2101 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 6
2102 store i32 %2, ptr %outvl
2103 ret <vscale x 1 x i16> %1
2106 define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
2107 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16:
2108 ; CHECK: # %bb.0: # %entry
2109 ; CHECK-NEXT: vmv1r.v v7, v8
2110 ; CHECK-NEXT: vmv1r.v v9, v8
2111 ; CHECK-NEXT: vmv1r.v v10, v8
2112 ; CHECK-NEXT: vmv1r.v v11, v8
2113 ; CHECK-NEXT: vmv1r.v v12, v8
2114 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2115 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
2116 ; CHECK-NEXT: csrr a0, vl
2117 ; CHECK-NEXT: sw a0, 0(a2)
2120 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2121 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2122 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 6
2123 store i32 %2, ptr %outvl
2124 ret <vscale x 1 x i16> %1
2127 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
2128 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32, i32)
2130 define <vscale x 1 x i16> @test_vlseg7ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) {
2131 ; CHECK-LABEL: test_vlseg7ff_nxv1i16:
2132 ; CHECK: # %bb.0: # %entry
2133 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2134 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
2135 ; CHECK-NEXT: csrr a0, vl
2136 ; CHECK-NEXT: sw a0, 0(a2)
2139 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
2140 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2141 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 7
2142 store i32 %2, ptr %outvl
2143 ret <vscale x 1 x i16> %1
2146 define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
2147 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16:
2148 ; CHECK: # %bb.0: # %entry
2149 ; CHECK-NEXT: vmv1r.v v7, v8
2150 ; CHECK-NEXT: vmv1r.v v9, v8
2151 ; CHECK-NEXT: vmv1r.v v10, v8
2152 ; CHECK-NEXT: vmv1r.v v11, v8
2153 ; CHECK-NEXT: vmv1r.v v12, v8
2154 ; CHECK-NEXT: vmv1r.v v13, v8
2155 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2156 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
2157 ; CHECK-NEXT: csrr a0, vl
2158 ; CHECK-NEXT: sw a0, 0(a2)
2161 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2162 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2163 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 7
2164 store i32 %2, ptr %outvl
2165 ret <vscale x 1 x i16> %1
2168 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr , i32)
2169 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, <vscale x 1 x i1>, i32, i32)
2171 define <vscale x 1 x i16> @test_vlseg8ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl) {
2172 ; CHECK-LABEL: test_vlseg8ff_nxv1i16:
2173 ; CHECK: # %bb.0: # %entry
2174 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
2175 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
2176 ; CHECK-NEXT: csrr a0, vl
2177 ; CHECK-NEXT: sw a0, 0(a2)
2180 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
2181 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2182 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 8
2183 store i32 %2, ptr %outvl
2184 ret <vscale x 1 x i16> %1
2187 define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
2188 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16:
2189 ; CHECK: # %bb.0: # %entry
2190 ; CHECK-NEXT: vmv1r.v v7, v8
2191 ; CHECK-NEXT: vmv1r.v v9, v8
2192 ; CHECK-NEXT: vmv1r.v v10, v8
2193 ; CHECK-NEXT: vmv1r.v v11, v8
2194 ; CHECK-NEXT: vmv1r.v v12, v8
2195 ; CHECK-NEXT: vmv1r.v v13, v8
2196 ; CHECK-NEXT: vmv1r.v v14, v8
2197 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
2198 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
2199 ; CHECK-NEXT: csrr a0, vl
2200 ; CHECK-NEXT: sw a0, 0(a2)
2203 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2204 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 1
2205 %2 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} %0, 8
2206 store i32 %2, ptr %outvl
2207 ret <vscale x 1 x i16> %1
2210 declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr , i32)
2211 declare {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, <vscale x 32 x i1>, i32, i32)
2213 define <vscale x 32 x i8> @test_vlseg2ff_nxv32i8(ptr %base, i32 %vl, ptr %outvl) {
2214 ; CHECK-LABEL: test_vlseg2ff_nxv32i8:
2215 ; CHECK: # %bb.0: # %entry
2216 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
2217 ; CHECK-NEXT: vlseg2e8ff.v v4, (a0)
2218 ; CHECK-NEXT: csrr a0, vl
2219 ; CHECK-NEXT: sw a0, 0(a2)
2222 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %vl)
2223 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %0, 1
2224 %2 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %0, 2
2225 store i32 %2, ptr %outvl
2226 ret <vscale x 32 x i8> %1
2229 define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i32 %vl, <vscale x 32 x i1> %mask, ptr %outvl) {
2230 ; CHECK-LABEL: test_vlseg2ff_mask_nxv32i8:
2231 ; CHECK: # %bb.0: # %entry
2232 ; CHECK-NEXT: vmv4r.v v4, v8
2233 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
2234 ; CHECK-NEXT: vlseg2e8ff.v v4, (a0), v0.t
2235 ; CHECK-NEXT: csrr a0, vl
2236 ; CHECK-NEXT: sw a0, 0(a2)
2239 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
2240 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %0, 1
2241 %2 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} %0, 2
2242 store i32 %2, ptr %outvl
2243 ret <vscale x 32 x i8> %1
2246 declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2247 declare {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32, i32)
2249 define <vscale x 2 x i8> @test_vlseg2ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
2250 ; CHECK-LABEL: test_vlseg2ff_nxv2i8:
2251 ; CHECK: # %bb.0: # %entry
2252 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2253 ; CHECK-NEXT: vlseg2e8ff.v v7, (a0)
2254 ; CHECK-NEXT: csrr a0, vl
2255 ; CHECK-NEXT: sw a0, 0(a2)
2258 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
2259 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2260 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 2
2261 store i32 %2, ptr %outvl
2262 ret <vscale x 2 x i8> %1
2265 define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2266 ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i8:
2267 ; CHECK: # %bb.0: # %entry
2268 ; CHECK-NEXT: vmv1r.v v7, v8
2269 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2270 ; CHECK-NEXT: vlseg2e8ff.v v7, (a0), v0.t
2271 ; CHECK-NEXT: csrr a0, vl
2272 ; CHECK-NEXT: sw a0, 0(a2)
2275 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2276 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2277 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 2
2278 store i32 %2, ptr %outvl
2279 ret <vscale x 2 x i8> %1
2282 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2283 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32, i32)
2285 define <vscale x 2 x i8> @test_vlseg3ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
2286 ; CHECK-LABEL: test_vlseg3ff_nxv2i8:
2287 ; CHECK: # %bb.0: # %entry
2288 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2289 ; CHECK-NEXT: vlseg3e8ff.v v7, (a0)
2290 ; CHECK-NEXT: csrr a0, vl
2291 ; CHECK-NEXT: sw a0, 0(a2)
2294 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
2295 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2296 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 3
2297 store i32 %2, ptr %outvl
2298 ret <vscale x 2 x i8> %1
2301 define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2302 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8:
2303 ; CHECK: # %bb.0: # %entry
2304 ; CHECK-NEXT: vmv1r.v v7, v8
2305 ; CHECK-NEXT: vmv1r.v v9, v8
2306 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2307 ; CHECK-NEXT: vlseg3e8ff.v v7, (a0), v0.t
2308 ; CHECK-NEXT: csrr a0, vl
2309 ; CHECK-NEXT: sw a0, 0(a2)
2312 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2313 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2314 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 3
2315 store i32 %2, ptr %outvl
2316 ret <vscale x 2 x i8> %1
2319 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2320 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32, i32)
2322 define <vscale x 2 x i8> @test_vlseg4ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
2323 ; CHECK-LABEL: test_vlseg4ff_nxv2i8:
2324 ; CHECK: # %bb.0: # %entry
2325 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2326 ; CHECK-NEXT: vlseg4e8ff.v v7, (a0)
2327 ; CHECK-NEXT: csrr a0, vl
2328 ; CHECK-NEXT: sw a0, 0(a2)
2331 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
2332 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2333 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 4
2334 store i32 %2, ptr %outvl
2335 ret <vscale x 2 x i8> %1
2338 define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2339 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8:
2340 ; CHECK: # %bb.0: # %entry
2341 ; CHECK-NEXT: vmv1r.v v7, v8
2342 ; CHECK-NEXT: vmv1r.v v9, v8
2343 ; CHECK-NEXT: vmv1r.v v10, v8
2344 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2345 ; CHECK-NEXT: vlseg4e8ff.v v7, (a0), v0.t
2346 ; CHECK-NEXT: csrr a0, vl
2347 ; CHECK-NEXT: sw a0, 0(a2)
2350 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2351 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2352 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 4
2353 store i32 %2, ptr %outvl
2354 ret <vscale x 2 x i8> %1
2357 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2358 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32, i32)
2360 define <vscale x 2 x i8> @test_vlseg5ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
2361 ; CHECK-LABEL: test_vlseg5ff_nxv2i8:
2362 ; CHECK: # %bb.0: # %entry
2363 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2364 ; CHECK-NEXT: vlseg5e8ff.v v7, (a0)
2365 ; CHECK-NEXT: csrr a0, vl
2366 ; CHECK-NEXT: sw a0, 0(a2)
2369 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
2370 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2371 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 5
2372 store i32 %2, ptr %outvl
2373 ret <vscale x 2 x i8> %1
2376 define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2377 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8:
2378 ; CHECK: # %bb.0: # %entry
2379 ; CHECK-NEXT: vmv1r.v v7, v8
2380 ; CHECK-NEXT: vmv1r.v v9, v8
2381 ; CHECK-NEXT: vmv1r.v v10, v8
2382 ; CHECK-NEXT: vmv1r.v v11, v8
2383 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2384 ; CHECK-NEXT: vlseg5e8ff.v v7, (a0), v0.t
2385 ; CHECK-NEXT: csrr a0, vl
2386 ; CHECK-NEXT: sw a0, 0(a2)
2389 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2390 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2391 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 5
2392 store i32 %2, ptr %outvl
2393 ret <vscale x 2 x i8> %1
2396 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2397 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32, i32)
2399 define <vscale x 2 x i8> @test_vlseg6ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
2400 ; CHECK-LABEL: test_vlseg6ff_nxv2i8:
2401 ; CHECK: # %bb.0: # %entry
2402 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2403 ; CHECK-NEXT: vlseg6e8ff.v v7, (a0)
2404 ; CHECK-NEXT: csrr a0, vl
2405 ; CHECK-NEXT: sw a0, 0(a2)
2408 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
2409 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2410 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 6
2411 store i32 %2, ptr %outvl
2412 ret <vscale x 2 x i8> %1
2415 define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2416 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8:
2417 ; CHECK: # %bb.0: # %entry
2418 ; CHECK-NEXT: vmv1r.v v7, v8
2419 ; CHECK-NEXT: vmv1r.v v9, v8
2420 ; CHECK-NEXT: vmv1r.v v10, v8
2421 ; CHECK-NEXT: vmv1r.v v11, v8
2422 ; CHECK-NEXT: vmv1r.v v12, v8
2423 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2424 ; CHECK-NEXT: vlseg6e8ff.v v7, (a0), v0.t
2425 ; CHECK-NEXT: csrr a0, vl
2426 ; CHECK-NEXT: sw a0, 0(a2)
2429 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2430 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2431 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 6
2432 store i32 %2, ptr %outvl
2433 ret <vscale x 2 x i8> %1
2436 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2437 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32, i32)
2439 define <vscale x 2 x i8> @test_vlseg7ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
2440 ; CHECK-LABEL: test_vlseg7ff_nxv2i8:
2441 ; CHECK: # %bb.0: # %entry
2442 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2443 ; CHECK-NEXT: vlseg7e8ff.v v7, (a0)
2444 ; CHECK-NEXT: csrr a0, vl
2445 ; CHECK-NEXT: sw a0, 0(a2)
2448 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
2449 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2450 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 7
2451 store i32 %2, ptr %outvl
2452 ret <vscale x 2 x i8> %1
2455 define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2456 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8:
2457 ; CHECK: # %bb.0: # %entry
2458 ; CHECK-NEXT: vmv1r.v v7, v8
2459 ; CHECK-NEXT: vmv1r.v v9, v8
2460 ; CHECK-NEXT: vmv1r.v v10, v8
2461 ; CHECK-NEXT: vmv1r.v v11, v8
2462 ; CHECK-NEXT: vmv1r.v v12, v8
2463 ; CHECK-NEXT: vmv1r.v v13, v8
2464 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2465 ; CHECK-NEXT: vlseg7e8ff.v v7, (a0), v0.t
2466 ; CHECK-NEXT: csrr a0, vl
2467 ; CHECK-NEXT: sw a0, 0(a2)
2470 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2471 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2472 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 7
2473 store i32 %2, ptr %outvl
2474 ret <vscale x 2 x i8> %1
2477 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr , i32)
2478 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, <vscale x 2 x i1>, i32, i32)
2480 define <vscale x 2 x i8> @test_vlseg8ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
2481 ; CHECK-LABEL: test_vlseg8ff_nxv2i8:
2482 ; CHECK: # %bb.0: # %entry
2483 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
2484 ; CHECK-NEXT: vlseg8e8ff.v v7, (a0)
2485 ; CHECK-NEXT: csrr a0, vl
2486 ; CHECK-NEXT: sw a0, 0(a2)
2489 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
2490 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2491 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 8
2492 store i32 %2, ptr %outvl
2493 ret <vscale x 2 x i8> %1
2496 define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2497 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8:
2498 ; CHECK: # %bb.0: # %entry
2499 ; CHECK-NEXT: vmv1r.v v7, v8
2500 ; CHECK-NEXT: vmv1r.v v9, v8
2501 ; CHECK-NEXT: vmv1r.v v10, v8
2502 ; CHECK-NEXT: vmv1r.v v11, v8
2503 ; CHECK-NEXT: vmv1r.v v12, v8
2504 ; CHECK-NEXT: vmv1r.v v13, v8
2505 ; CHECK-NEXT: vmv1r.v v14, v8
2506 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
2507 ; CHECK-NEXT: vlseg8e8ff.v v7, (a0), v0.t
2508 ; CHECK-NEXT: csrr a0, vl
2509 ; CHECK-NEXT: sw a0, 0(a2)
2512 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2513 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 1
2514 %2 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} %0, 8
2515 store i32 %2, ptr %outvl
2516 ret <vscale x 2 x i8> %1
2519 declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2520 declare {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32, i32)
2522 define <vscale x 2 x i16> @test_vlseg2ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) {
2523 ; CHECK-LABEL: test_vlseg2ff_nxv2i16:
2524 ; CHECK: # %bb.0: # %entry
2525 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2526 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
2527 ; CHECK-NEXT: csrr a0, vl
2528 ; CHECK-NEXT: sw a0, 0(a2)
2531 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
2532 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2533 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 2
2534 store i32 %2, ptr %outvl
2535 ret <vscale x 2 x i16> %1
2538 define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2539 ; CHECK-LABEL: test_vlseg2ff_mask_nxv2i16:
2540 ; CHECK: # %bb.0: # %entry
2541 ; CHECK-NEXT: vmv1r.v v7, v8
2542 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2543 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
2544 ; CHECK-NEXT: csrr a0, vl
2545 ; CHECK-NEXT: sw a0, 0(a2)
2548 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2549 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2550 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 2
2551 store i32 %2, ptr %outvl
2552 ret <vscale x 2 x i16> %1
2555 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2556 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32, i32)
2558 define <vscale x 2 x i16> @test_vlseg3ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) {
2559 ; CHECK-LABEL: test_vlseg3ff_nxv2i16:
2560 ; CHECK: # %bb.0: # %entry
2561 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2562 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
2563 ; CHECK-NEXT: csrr a0, vl
2564 ; CHECK-NEXT: sw a0, 0(a2)
2567 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
2568 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2569 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 3
2570 store i32 %2, ptr %outvl
2571 ret <vscale x 2 x i16> %1
2574 define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2575 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16:
2576 ; CHECK: # %bb.0: # %entry
2577 ; CHECK-NEXT: vmv1r.v v7, v8
2578 ; CHECK-NEXT: vmv1r.v v9, v8
2579 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2580 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
2581 ; CHECK-NEXT: csrr a0, vl
2582 ; CHECK-NEXT: sw a0, 0(a2)
2585 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2586 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2587 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 3
2588 store i32 %2, ptr %outvl
2589 ret <vscale x 2 x i16> %1
2592 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2593 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32, i32)
2595 define <vscale x 2 x i16> @test_vlseg4ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) {
2596 ; CHECK-LABEL: test_vlseg4ff_nxv2i16:
2597 ; CHECK: # %bb.0: # %entry
2598 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2599 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
2600 ; CHECK-NEXT: csrr a0, vl
2601 ; CHECK-NEXT: sw a0, 0(a2)
2604 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
2605 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2606 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 4
2607 store i32 %2, ptr %outvl
2608 ret <vscale x 2 x i16> %1
2611 define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2612 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16:
2613 ; CHECK: # %bb.0: # %entry
2614 ; CHECK-NEXT: vmv1r.v v7, v8
2615 ; CHECK-NEXT: vmv1r.v v9, v8
2616 ; CHECK-NEXT: vmv1r.v v10, v8
2617 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2618 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
2619 ; CHECK-NEXT: csrr a0, vl
2620 ; CHECK-NEXT: sw a0, 0(a2)
2623 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2624 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2625 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 4
2626 store i32 %2, ptr %outvl
2627 ret <vscale x 2 x i16> %1
2630 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2631 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32, i32)
2633 define <vscale x 2 x i16> @test_vlseg5ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) {
2634 ; CHECK-LABEL: test_vlseg5ff_nxv2i16:
2635 ; CHECK: # %bb.0: # %entry
2636 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2637 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
2638 ; CHECK-NEXT: csrr a0, vl
2639 ; CHECK-NEXT: sw a0, 0(a2)
2642 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
2643 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2644 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 5
2645 store i32 %2, ptr %outvl
2646 ret <vscale x 2 x i16> %1
2649 define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2650 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16:
2651 ; CHECK: # %bb.0: # %entry
2652 ; CHECK-NEXT: vmv1r.v v7, v8
2653 ; CHECK-NEXT: vmv1r.v v9, v8
2654 ; CHECK-NEXT: vmv1r.v v10, v8
2655 ; CHECK-NEXT: vmv1r.v v11, v8
2656 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2657 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
2658 ; CHECK-NEXT: csrr a0, vl
2659 ; CHECK-NEXT: sw a0, 0(a2)
2662 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2663 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2664 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 5
2665 store i32 %2, ptr %outvl
2666 ret <vscale x 2 x i16> %1
2669 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2670 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32, i32)
2672 define <vscale x 2 x i16> @test_vlseg6ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) {
2673 ; CHECK-LABEL: test_vlseg6ff_nxv2i16:
2674 ; CHECK: # %bb.0: # %entry
2675 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2676 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
2677 ; CHECK-NEXT: csrr a0, vl
2678 ; CHECK-NEXT: sw a0, 0(a2)
2681 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
2682 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2683 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 6
2684 store i32 %2, ptr %outvl
2685 ret <vscale x 2 x i16> %1
2688 define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2689 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16:
2690 ; CHECK: # %bb.0: # %entry
2691 ; CHECK-NEXT: vmv1r.v v7, v8
2692 ; CHECK-NEXT: vmv1r.v v9, v8
2693 ; CHECK-NEXT: vmv1r.v v10, v8
2694 ; CHECK-NEXT: vmv1r.v v11, v8
2695 ; CHECK-NEXT: vmv1r.v v12, v8
2696 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2697 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
2698 ; CHECK-NEXT: csrr a0, vl
2699 ; CHECK-NEXT: sw a0, 0(a2)
2702 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2703 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2704 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 6
2705 store i32 %2, ptr %outvl
2706 ret <vscale x 2 x i16> %1
2709 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2710 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32, i32)
2712 define <vscale x 2 x i16> @test_vlseg7ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) {
2713 ; CHECK-LABEL: test_vlseg7ff_nxv2i16:
2714 ; CHECK: # %bb.0: # %entry
2715 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2716 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
2717 ; CHECK-NEXT: csrr a0, vl
2718 ; CHECK-NEXT: sw a0, 0(a2)
2721 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
2722 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2723 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 7
2724 store i32 %2, ptr %outvl
2725 ret <vscale x 2 x i16> %1
2728 define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2729 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16:
2730 ; CHECK: # %bb.0: # %entry
2731 ; CHECK-NEXT: vmv1r.v v7, v8
2732 ; CHECK-NEXT: vmv1r.v v9, v8
2733 ; CHECK-NEXT: vmv1r.v v10, v8
2734 ; CHECK-NEXT: vmv1r.v v11, v8
2735 ; CHECK-NEXT: vmv1r.v v12, v8
2736 ; CHECK-NEXT: vmv1r.v v13, v8
2737 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2738 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
2739 ; CHECK-NEXT: csrr a0, vl
2740 ; CHECK-NEXT: sw a0, 0(a2)
2743 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2744 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2745 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 7
2746 store i32 %2, ptr %outvl
2747 ret <vscale x 2 x i16> %1
2750 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr , i32)
2751 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, <vscale x 2 x i1>, i32, i32)
2753 define <vscale x 2 x i16> @test_vlseg8ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl) {
2754 ; CHECK-LABEL: test_vlseg8ff_nxv2i16:
2755 ; CHECK: # %bb.0: # %entry
2756 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
2757 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
2758 ; CHECK-NEXT: csrr a0, vl
2759 ; CHECK-NEXT: sw a0, 0(a2)
2762 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
2763 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2764 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 8
2765 store i32 %2, ptr %outvl
2766 ret <vscale x 2 x i16> %1
2769 define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
2770 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16:
2771 ; CHECK: # %bb.0: # %entry
2772 ; CHECK-NEXT: vmv1r.v v7, v8
2773 ; CHECK-NEXT: vmv1r.v v9, v8
2774 ; CHECK-NEXT: vmv1r.v v10, v8
2775 ; CHECK-NEXT: vmv1r.v v11, v8
2776 ; CHECK-NEXT: vmv1r.v v12, v8
2777 ; CHECK-NEXT: vmv1r.v v13, v8
2778 ; CHECK-NEXT: vmv1r.v v14, v8
2779 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
2780 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
2781 ; CHECK-NEXT: csrr a0, vl
2782 ; CHECK-NEXT: sw a0, 0(a2)
2785 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2786 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 1
2787 %2 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} %0, 8
2788 store i32 %2, ptr %outvl
2789 ret <vscale x 2 x i16> %1
2792 declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i32)
2793 declare {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i32, i32)
2795 define <vscale x 4 x i32> @test_vlseg2ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl) {
2796 ; CHECK-LABEL: test_vlseg2ff_nxv4i32:
2797 ; CHECK: # %bb.0: # %entry
2798 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2799 ; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
2800 ; CHECK-NEXT: csrr a0, vl
2801 ; CHECK-NEXT: sw a0, 0(a2)
2804 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
2805 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 1
2806 %2 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 2
2807 store i32 %2, ptr %outvl
2808 ret <vscale x 4 x i32> %1
2811 define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
2812 ; CHECK-LABEL: test_vlseg2ff_mask_nxv4i32:
2813 ; CHECK: # %bb.0: # %entry
2814 ; CHECK-NEXT: vmv2r.v v6, v8
2815 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2816 ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
2817 ; CHECK-NEXT: csrr a0, vl
2818 ; CHECK-NEXT: sw a0, 0(a2)
2821 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2822 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 1
2823 %2 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 2
2824 store i32 %2, ptr %outvl
2825 ret <vscale x 4 x i32> %1
2828 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i32)
2829 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i32, i32)
2831 define <vscale x 4 x i32> @test_vlseg3ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl) {
2832 ; CHECK-LABEL: test_vlseg3ff_nxv4i32:
2833 ; CHECK: # %bb.0: # %entry
2834 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2835 ; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
2836 ; CHECK-NEXT: csrr a0, vl
2837 ; CHECK-NEXT: sw a0, 0(a2)
2840 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
2841 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 1
2842 %2 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 3
2843 store i32 %2, ptr %outvl
2844 ret <vscale x 4 x i32> %1
2847 define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
2848 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32:
2849 ; CHECK: # %bb.0: # %entry
2850 ; CHECK-NEXT: vmv2r.v v6, v8
2851 ; CHECK-NEXT: vmv2r.v v10, v8
2852 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2853 ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
2854 ; CHECK-NEXT: csrr a0, vl
2855 ; CHECK-NEXT: sw a0, 0(a2)
2858 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2859 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 1
2860 %2 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 3
2861 store i32 %2, ptr %outvl
2862 ret <vscale x 4 x i32> %1
2865 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i32)
2866 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, <vscale x 4 x i1>, i32, i32)
2868 define <vscale x 4 x i32> @test_vlseg4ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl) {
2869 ; CHECK-LABEL: test_vlseg4ff_nxv4i32:
2870 ; CHECK: # %bb.0: # %entry
2871 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
2872 ; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
2873 ; CHECK-NEXT: csrr a0, vl
2874 ; CHECK-NEXT: sw a0, 0(a2)
2877 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
2878 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 1
2879 %2 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 4
2880 store i32 %2, ptr %outvl
2881 ret <vscale x 4 x i32> %1
2884 define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
2885 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32:
2886 ; CHECK: # %bb.0: # %entry
2887 ; CHECK-NEXT: vmv2r.v v6, v8
2888 ; CHECK-NEXT: vmv2r.v v10, v8
2889 ; CHECK-NEXT: vmv2r.v v12, v8
2890 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
2891 ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
2892 ; CHECK-NEXT: csrr a0, vl
2893 ; CHECK-NEXT: sw a0, 0(a2)
2896 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2897 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 1
2898 %2 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} %0, 4
2899 store i32 %2, ptr %outvl
2900 ret <vscale x 4 x i32> %1
2903 declare {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr , i32)
2904 declare {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, <vscale x 16 x i1>, i32, i32)
2906 define <vscale x 16 x half> @test_vlseg2ff_nxv16f16(ptr %base, i32 %vl, ptr %outvl) {
2907 ; CHECK-LABEL: test_vlseg2ff_nxv16f16:
2908 ; CHECK: # %bb.0: # %entry
2909 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
2910 ; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
2911 ; CHECK-NEXT: csrr a0, vl
2912 ; CHECK-NEXT: sw a0, 0(a2)
2915 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %vl)
2916 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>, i32} %0, 1
2917 %2 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>, i32} %0, 2
2918 store i32 %2, ptr %outvl
2919 ret <vscale x 16 x half> %1
2922 define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
2923 ; CHECK-LABEL: test_vlseg2ff_mask_nxv16f16:
2924 ; CHECK: # %bb.0: # %entry
2925 ; CHECK-NEXT: vmv4r.v v4, v8
2926 ; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
2927 ; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
2928 ; CHECK-NEXT: csrr a0, vl
2929 ; CHECK-NEXT: sw a0, 0(a2)
2932 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
2933 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>, i32} %0, 1
2934 %2 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>, i32} %0, 2
2935 store i32 %2, ptr %outvl
2936 ret <vscale x 16 x half> %1
2939 declare {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr , i32)
2940 declare {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, <vscale x 4 x i1>, i32, i32)
2942 define <vscale x 4 x double> @test_vlseg2ff_nxv4f64(ptr %base, i32 %vl, ptr %outvl) {
2943 ; CHECK-LABEL: test_vlseg2ff_nxv4f64:
2944 ; CHECK: # %bb.0: # %entry
2945 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
2946 ; CHECK-NEXT: vlseg2e64ff.v v4, (a0)
2947 ; CHECK-NEXT: csrr a0, vl
2948 ; CHECK-NEXT: sw a0, 0(a2)
2951 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %vl)
2952 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>, i32} %0, 1
2953 %2 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>, i32} %0, 2
2954 store i32 %2, ptr %outvl
2955 ret <vscale x 4 x double> %1
2958 define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
2959 ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f64:
2960 ; CHECK: # %bb.0: # %entry
2961 ; CHECK-NEXT: vmv4r.v v4, v8
2962 ; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
2963 ; CHECK-NEXT: vlseg2e64ff.v v4, (a0), v0.t
2964 ; CHECK-NEXT: csrr a0, vl
2965 ; CHECK-NEXT: sw a0, 0(a2)
2968 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2969 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>, i32} %0, 1
2970 %2 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>, i32} %0, 2
2971 store i32 %2, ptr %outvl
2972 ret <vscale x 4 x double> %1
2975 declare {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
2976 declare {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32, i32)
2978 define <vscale x 1 x double> @test_vlseg2ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) {
2979 ; CHECK-LABEL: test_vlseg2ff_nxv1f64:
2980 ; CHECK: # %bb.0: # %entry
2981 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
2982 ; CHECK-NEXT: vlseg2e64ff.v v7, (a0)
2983 ; CHECK-NEXT: csrr a0, vl
2984 ; CHECK-NEXT: sw a0, 0(a2)
2987 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
2988 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
2989 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 2
2990 store i32 %2, ptr %outvl
2991 ret <vscale x 1 x double> %1
2994 define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
2995 ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f64:
2996 ; CHECK: # %bb.0: # %entry
2997 ; CHECK-NEXT: vmv1r.v v7, v8
2998 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
2999 ; CHECK-NEXT: vlseg2e64ff.v v7, (a0), v0.t
3000 ; CHECK-NEXT: csrr a0, vl
3001 ; CHECK-NEXT: sw a0, 0(a2)
3004 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3005 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3006 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 2
3007 store i32 %2, ptr %outvl
3008 ret <vscale x 1 x double> %1
3011 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
3012 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32, i32)
3014 define <vscale x 1 x double> @test_vlseg3ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) {
3015 ; CHECK-LABEL: test_vlseg3ff_nxv1f64:
3016 ; CHECK: # %bb.0: # %entry
3017 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3018 ; CHECK-NEXT: vlseg3e64ff.v v7, (a0)
3019 ; CHECK-NEXT: csrr a0, vl
3020 ; CHECK-NEXT: sw a0, 0(a2)
3023 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
3024 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3025 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 3
3026 store i32 %2, ptr %outvl
3027 ret <vscale x 1 x double> %1
3030 define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3031 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64:
3032 ; CHECK: # %bb.0: # %entry
3033 ; CHECK-NEXT: vmv1r.v v7, v8
3034 ; CHECK-NEXT: vmv1r.v v9, v8
3035 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3036 ; CHECK-NEXT: vlseg3e64ff.v v7, (a0), v0.t
3037 ; CHECK-NEXT: csrr a0, vl
3038 ; CHECK-NEXT: sw a0, 0(a2)
3041 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3042 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3043 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 3
3044 store i32 %2, ptr %outvl
3045 ret <vscale x 1 x double> %1
3048 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
3049 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32, i32)
3051 define <vscale x 1 x double> @test_vlseg4ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) {
3052 ; CHECK-LABEL: test_vlseg4ff_nxv1f64:
3053 ; CHECK: # %bb.0: # %entry
3054 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3055 ; CHECK-NEXT: vlseg4e64ff.v v7, (a0)
3056 ; CHECK-NEXT: csrr a0, vl
3057 ; CHECK-NEXT: sw a0, 0(a2)
3060 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
3061 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3062 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 4
3063 store i32 %2, ptr %outvl
3064 ret <vscale x 1 x double> %1
3067 define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3068 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64:
3069 ; CHECK: # %bb.0: # %entry
3070 ; CHECK-NEXT: vmv1r.v v7, v8
3071 ; CHECK-NEXT: vmv1r.v v9, v8
3072 ; CHECK-NEXT: vmv1r.v v10, v8
3073 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3074 ; CHECK-NEXT: vlseg4e64ff.v v7, (a0), v0.t
3075 ; CHECK-NEXT: csrr a0, vl
3076 ; CHECK-NEXT: sw a0, 0(a2)
3079 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3080 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3081 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 4
3082 store i32 %2, ptr %outvl
3083 ret <vscale x 1 x double> %1
3086 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
3087 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32, i32)
3089 define <vscale x 1 x double> @test_vlseg5ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) {
3090 ; CHECK-LABEL: test_vlseg5ff_nxv1f64:
3091 ; CHECK: # %bb.0: # %entry
3092 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3093 ; CHECK-NEXT: vlseg5e64ff.v v7, (a0)
3094 ; CHECK-NEXT: csrr a0, vl
3095 ; CHECK-NEXT: sw a0, 0(a2)
3098 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
3099 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3100 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 5
3101 store i32 %2, ptr %outvl
3102 ret <vscale x 1 x double> %1
3105 define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3106 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64:
3107 ; CHECK: # %bb.0: # %entry
3108 ; CHECK-NEXT: vmv1r.v v7, v8
3109 ; CHECK-NEXT: vmv1r.v v9, v8
3110 ; CHECK-NEXT: vmv1r.v v10, v8
3111 ; CHECK-NEXT: vmv1r.v v11, v8
3112 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3113 ; CHECK-NEXT: vlseg5e64ff.v v7, (a0), v0.t
3114 ; CHECK-NEXT: csrr a0, vl
3115 ; CHECK-NEXT: sw a0, 0(a2)
3118 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3119 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3120 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 5
3121 store i32 %2, ptr %outvl
3122 ret <vscale x 1 x double> %1
3125 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
3126 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32, i32)
3128 define <vscale x 1 x double> @test_vlseg6ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) {
3129 ; CHECK-LABEL: test_vlseg6ff_nxv1f64:
3130 ; CHECK: # %bb.0: # %entry
3131 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3132 ; CHECK-NEXT: vlseg6e64ff.v v7, (a0)
3133 ; CHECK-NEXT: csrr a0, vl
3134 ; CHECK-NEXT: sw a0, 0(a2)
3137 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
3138 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3139 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 6
3140 store i32 %2, ptr %outvl
3141 ret <vscale x 1 x double> %1
3144 define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3145 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64:
3146 ; CHECK: # %bb.0: # %entry
3147 ; CHECK-NEXT: vmv1r.v v7, v8
3148 ; CHECK-NEXT: vmv1r.v v9, v8
3149 ; CHECK-NEXT: vmv1r.v v10, v8
3150 ; CHECK-NEXT: vmv1r.v v11, v8
3151 ; CHECK-NEXT: vmv1r.v v12, v8
3152 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3153 ; CHECK-NEXT: vlseg6e64ff.v v7, (a0), v0.t
3154 ; CHECK-NEXT: csrr a0, vl
3155 ; CHECK-NEXT: sw a0, 0(a2)
3158 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3159 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3160 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 6
3161 store i32 %2, ptr %outvl
3162 ret <vscale x 1 x double> %1
3165 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
3166 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32, i32)
3168 define <vscale x 1 x double> @test_vlseg7ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) {
3169 ; CHECK-LABEL: test_vlseg7ff_nxv1f64:
3170 ; CHECK: # %bb.0: # %entry
3171 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3172 ; CHECK-NEXT: vlseg7e64ff.v v7, (a0)
3173 ; CHECK-NEXT: csrr a0, vl
3174 ; CHECK-NEXT: sw a0, 0(a2)
3177 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
3178 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3179 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 7
3180 store i32 %2, ptr %outvl
3181 ret <vscale x 1 x double> %1
3184 define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3185 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64:
3186 ; CHECK: # %bb.0: # %entry
3187 ; CHECK-NEXT: vmv1r.v v7, v8
3188 ; CHECK-NEXT: vmv1r.v v9, v8
3189 ; CHECK-NEXT: vmv1r.v v10, v8
3190 ; CHECK-NEXT: vmv1r.v v11, v8
3191 ; CHECK-NEXT: vmv1r.v v12, v8
3192 ; CHECK-NEXT: vmv1r.v v13, v8
3193 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3194 ; CHECK-NEXT: vlseg7e64ff.v v7, (a0), v0.t
3195 ; CHECK-NEXT: csrr a0, vl
3196 ; CHECK-NEXT: sw a0, 0(a2)
3199 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3200 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3201 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 7
3202 store i32 %2, ptr %outvl
3203 ret <vscale x 1 x double> %1
3206 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr , i32)
3207 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, <vscale x 1 x i1>, i32, i32)
3209 define <vscale x 1 x double> @test_vlseg8ff_nxv1f64(ptr %base, i32 %vl, ptr %outvl) {
3210 ; CHECK-LABEL: test_vlseg8ff_nxv1f64:
3211 ; CHECK: # %bb.0: # %entry
3212 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
3213 ; CHECK-NEXT: vlseg8e64ff.v v7, (a0)
3214 ; CHECK-NEXT: csrr a0, vl
3215 ; CHECK-NEXT: sw a0, 0(a2)
3218 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
3219 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3220 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 8
3221 store i32 %2, ptr %outvl
3222 ret <vscale x 1 x double> %1
3225 define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3226 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64:
3227 ; CHECK: # %bb.0: # %entry
3228 ; CHECK-NEXT: vmv1r.v v7, v8
3229 ; CHECK-NEXT: vmv1r.v v9, v8
3230 ; CHECK-NEXT: vmv1r.v v10, v8
3231 ; CHECK-NEXT: vmv1r.v v11, v8
3232 ; CHECK-NEXT: vmv1r.v v12, v8
3233 ; CHECK-NEXT: vmv1r.v v13, v8
3234 ; CHECK-NEXT: vmv1r.v v14, v8
3235 ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
3236 ; CHECK-NEXT: vlseg8e64ff.v v7, (a0), v0.t
3237 ; CHECK-NEXT: csrr a0, vl
3238 ; CHECK-NEXT: sw a0, 0(a2)
3241 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3242 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 1
3243 %2 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} %0, 8
3244 store i32 %2, ptr %outvl
3245 ret <vscale x 1 x double> %1
3248 declare {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
3249 declare {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32, i32)
3251 define <vscale x 2 x float> @test_vlseg2ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) {
3252 ; CHECK-LABEL: test_vlseg2ff_nxv2f32:
3253 ; CHECK: # %bb.0: # %entry
3254 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3255 ; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
3256 ; CHECK-NEXT: csrr a0, vl
3257 ; CHECK-NEXT: sw a0, 0(a2)
3260 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
3261 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3262 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 2
3263 store i32 %2, ptr %outvl
3264 ret <vscale x 2 x float> %1
3267 define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
3268 ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f32:
3269 ; CHECK: # %bb.0: # %entry
3270 ; CHECK-NEXT: vmv1r.v v7, v8
3271 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3272 ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
3273 ; CHECK-NEXT: csrr a0, vl
3274 ; CHECK-NEXT: sw a0, 0(a2)
3277 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3278 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3279 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 2
3280 store i32 %2, ptr %outvl
3281 ret <vscale x 2 x float> %1
3284 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
3285 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32, i32)
3287 define <vscale x 2 x float> @test_vlseg3ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) {
3288 ; CHECK-LABEL: test_vlseg3ff_nxv2f32:
3289 ; CHECK: # %bb.0: # %entry
3290 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3291 ; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
3292 ; CHECK-NEXT: csrr a0, vl
3293 ; CHECK-NEXT: sw a0, 0(a2)
3296 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
3297 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3298 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 3
3299 store i32 %2, ptr %outvl
3300 ret <vscale x 2 x float> %1
3303 define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
3304 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32:
3305 ; CHECK: # %bb.0: # %entry
3306 ; CHECK-NEXT: vmv1r.v v7, v8
3307 ; CHECK-NEXT: vmv1r.v v9, v8
3308 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3309 ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
3310 ; CHECK-NEXT: csrr a0, vl
3311 ; CHECK-NEXT: sw a0, 0(a2)
3314 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3315 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3316 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 3
3317 store i32 %2, ptr %outvl
3318 ret <vscale x 2 x float> %1
3321 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
3322 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32, i32)
3324 define <vscale x 2 x float> @test_vlseg4ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) {
3325 ; CHECK-LABEL: test_vlseg4ff_nxv2f32:
3326 ; CHECK: # %bb.0: # %entry
3327 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3328 ; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
3329 ; CHECK-NEXT: csrr a0, vl
3330 ; CHECK-NEXT: sw a0, 0(a2)
3333 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
3334 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3335 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 4
3336 store i32 %2, ptr %outvl
3337 ret <vscale x 2 x float> %1
3340 define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
3341 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32:
3342 ; CHECK: # %bb.0: # %entry
3343 ; CHECK-NEXT: vmv1r.v v7, v8
3344 ; CHECK-NEXT: vmv1r.v v9, v8
3345 ; CHECK-NEXT: vmv1r.v v10, v8
3346 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3347 ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
3348 ; CHECK-NEXT: csrr a0, vl
3349 ; CHECK-NEXT: sw a0, 0(a2)
3352 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3353 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3354 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 4
3355 store i32 %2, ptr %outvl
3356 ret <vscale x 2 x float> %1
3359 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
3360 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32, i32)
3362 define <vscale x 2 x float> @test_vlseg5ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) {
3363 ; CHECK-LABEL: test_vlseg5ff_nxv2f32:
3364 ; CHECK: # %bb.0: # %entry
3365 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3366 ; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
3367 ; CHECK-NEXT: csrr a0, vl
3368 ; CHECK-NEXT: sw a0, 0(a2)
3371 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
3372 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3373 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 5
3374 store i32 %2, ptr %outvl
3375 ret <vscale x 2 x float> %1
3378 define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
3379 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32:
3380 ; CHECK: # %bb.0: # %entry
3381 ; CHECK-NEXT: vmv1r.v v7, v8
3382 ; CHECK-NEXT: vmv1r.v v9, v8
3383 ; CHECK-NEXT: vmv1r.v v10, v8
3384 ; CHECK-NEXT: vmv1r.v v11, v8
3385 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3386 ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
3387 ; CHECK-NEXT: csrr a0, vl
3388 ; CHECK-NEXT: sw a0, 0(a2)
3391 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3392 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3393 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 5
3394 store i32 %2, ptr %outvl
3395 ret <vscale x 2 x float> %1
3398 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
3399 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32, i32)
3401 define <vscale x 2 x float> @test_vlseg6ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) {
3402 ; CHECK-LABEL: test_vlseg6ff_nxv2f32:
3403 ; CHECK: # %bb.0: # %entry
3404 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3405 ; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
3406 ; CHECK-NEXT: csrr a0, vl
3407 ; CHECK-NEXT: sw a0, 0(a2)
3410 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
3411 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3412 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 6
3413 store i32 %2, ptr %outvl
3414 ret <vscale x 2 x float> %1
3417 define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
3418 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32:
3419 ; CHECK: # %bb.0: # %entry
3420 ; CHECK-NEXT: vmv1r.v v7, v8
3421 ; CHECK-NEXT: vmv1r.v v9, v8
3422 ; CHECK-NEXT: vmv1r.v v10, v8
3423 ; CHECK-NEXT: vmv1r.v v11, v8
3424 ; CHECK-NEXT: vmv1r.v v12, v8
3425 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3426 ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
3427 ; CHECK-NEXT: csrr a0, vl
3428 ; CHECK-NEXT: sw a0, 0(a2)
3431 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3432 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3433 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 6
3434 store i32 %2, ptr %outvl
3435 ret <vscale x 2 x float> %1
3438 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
3439 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32, i32)
3441 define <vscale x 2 x float> @test_vlseg7ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) {
3442 ; CHECK-LABEL: test_vlseg7ff_nxv2f32:
3443 ; CHECK: # %bb.0: # %entry
3444 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3445 ; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
3446 ; CHECK-NEXT: csrr a0, vl
3447 ; CHECK-NEXT: sw a0, 0(a2)
3450 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
3451 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3452 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 7
3453 store i32 %2, ptr %outvl
3454 ret <vscale x 2 x float> %1
3457 define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
3458 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32:
3459 ; CHECK: # %bb.0: # %entry
3460 ; CHECK-NEXT: vmv1r.v v7, v8
3461 ; CHECK-NEXT: vmv1r.v v9, v8
3462 ; CHECK-NEXT: vmv1r.v v10, v8
3463 ; CHECK-NEXT: vmv1r.v v11, v8
3464 ; CHECK-NEXT: vmv1r.v v12, v8
3465 ; CHECK-NEXT: vmv1r.v v13, v8
3466 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3467 ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
3468 ; CHECK-NEXT: csrr a0, vl
3469 ; CHECK-NEXT: sw a0, 0(a2)
3472 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3473 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3474 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 7
3475 store i32 %2, ptr %outvl
3476 ret <vscale x 2 x float> %1
3479 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr , i32)
3480 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, <vscale x 2 x i1>, i32, i32)
3482 define <vscale x 2 x float> @test_vlseg8ff_nxv2f32(ptr %base, i32 %vl, ptr %outvl) {
3483 ; CHECK-LABEL: test_vlseg8ff_nxv2f32:
3484 ; CHECK: # %bb.0: # %entry
3485 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
3486 ; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
3487 ; CHECK-NEXT: csrr a0, vl
3488 ; CHECK-NEXT: sw a0, 0(a2)
3491 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
3492 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3493 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 8
3494 store i32 %2, ptr %outvl
3495 ret <vscale x 2 x float> %1
3498 define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
3499 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32:
3500 ; CHECK: # %bb.0: # %entry
3501 ; CHECK-NEXT: vmv1r.v v7, v8
3502 ; CHECK-NEXT: vmv1r.v v9, v8
3503 ; CHECK-NEXT: vmv1r.v v10, v8
3504 ; CHECK-NEXT: vmv1r.v v11, v8
3505 ; CHECK-NEXT: vmv1r.v v12, v8
3506 ; CHECK-NEXT: vmv1r.v v13, v8
3507 ; CHECK-NEXT: vmv1r.v v14, v8
3508 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
3509 ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
3510 ; CHECK-NEXT: csrr a0, vl
3511 ; CHECK-NEXT: sw a0, 0(a2)
3514 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3515 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 1
3516 %2 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} %0, 8
3517 store i32 %2, ptr %outvl
3518 ret <vscale x 2 x float> %1
3521 declare {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3522 declare {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32, i32)
3524 define <vscale x 1 x half> @test_vlseg2ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) {
3525 ; CHECK-LABEL: test_vlseg2ff_nxv1f16:
3526 ; CHECK: # %bb.0: # %entry
3527 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3528 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
3529 ; CHECK-NEXT: csrr a0, vl
3530 ; CHECK-NEXT: sw a0, 0(a2)
3533 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
3534 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3535 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 2
3536 store i32 %2, ptr %outvl
3537 ret <vscale x 1 x half> %1
3540 define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3541 ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f16:
3542 ; CHECK: # %bb.0: # %entry
3543 ; CHECK-NEXT: vmv1r.v v7, v8
3544 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3545 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
3546 ; CHECK-NEXT: csrr a0, vl
3547 ; CHECK-NEXT: sw a0, 0(a2)
3550 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3551 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3552 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 2
3553 store i32 %2, ptr %outvl
3554 ret <vscale x 1 x half> %1
3557 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3558 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32, i32)
3560 define <vscale x 1 x half> @test_vlseg3ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) {
3561 ; CHECK-LABEL: test_vlseg3ff_nxv1f16:
3562 ; CHECK: # %bb.0: # %entry
3563 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3564 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
3565 ; CHECK-NEXT: csrr a0, vl
3566 ; CHECK-NEXT: sw a0, 0(a2)
3569 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
3570 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3571 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 3
3572 store i32 %2, ptr %outvl
3573 ret <vscale x 1 x half> %1
3576 define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3577 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16:
3578 ; CHECK: # %bb.0: # %entry
3579 ; CHECK-NEXT: vmv1r.v v7, v8
3580 ; CHECK-NEXT: vmv1r.v v9, v8
3581 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3582 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
3583 ; CHECK-NEXT: csrr a0, vl
3584 ; CHECK-NEXT: sw a0, 0(a2)
3587 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3588 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3589 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 3
3590 store i32 %2, ptr %outvl
3591 ret <vscale x 1 x half> %1
3594 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3595 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32, i32)
3597 define <vscale x 1 x half> @test_vlseg4ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) {
3598 ; CHECK-LABEL: test_vlseg4ff_nxv1f16:
3599 ; CHECK: # %bb.0: # %entry
3600 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3601 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
3602 ; CHECK-NEXT: csrr a0, vl
3603 ; CHECK-NEXT: sw a0, 0(a2)
3606 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
3607 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3608 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 4
3609 store i32 %2, ptr %outvl
3610 ret <vscale x 1 x half> %1
3613 define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3614 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16:
3615 ; CHECK: # %bb.0: # %entry
3616 ; CHECK-NEXT: vmv1r.v v7, v8
3617 ; CHECK-NEXT: vmv1r.v v9, v8
3618 ; CHECK-NEXT: vmv1r.v v10, v8
3619 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3620 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
3621 ; CHECK-NEXT: csrr a0, vl
3622 ; CHECK-NEXT: sw a0, 0(a2)
3625 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3626 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3627 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 4
3628 store i32 %2, ptr %outvl
3629 ret <vscale x 1 x half> %1
3632 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3633 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32, i32)
3635 define <vscale x 1 x half> @test_vlseg5ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) {
3636 ; CHECK-LABEL: test_vlseg5ff_nxv1f16:
3637 ; CHECK: # %bb.0: # %entry
3638 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3639 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
3640 ; CHECK-NEXT: csrr a0, vl
3641 ; CHECK-NEXT: sw a0, 0(a2)
3644 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
3645 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3646 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 5
3647 store i32 %2, ptr %outvl
3648 ret <vscale x 1 x half> %1
3651 define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3652 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16:
3653 ; CHECK: # %bb.0: # %entry
3654 ; CHECK-NEXT: vmv1r.v v7, v8
3655 ; CHECK-NEXT: vmv1r.v v9, v8
3656 ; CHECK-NEXT: vmv1r.v v10, v8
3657 ; CHECK-NEXT: vmv1r.v v11, v8
3658 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3659 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
3660 ; CHECK-NEXT: csrr a0, vl
3661 ; CHECK-NEXT: sw a0, 0(a2)
3664 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3665 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3666 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 5
3667 store i32 %2, ptr %outvl
3668 ret <vscale x 1 x half> %1
3671 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3672 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32, i32)
3674 define <vscale x 1 x half> @test_vlseg6ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) {
3675 ; CHECK-LABEL: test_vlseg6ff_nxv1f16:
3676 ; CHECK: # %bb.0: # %entry
3677 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3678 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
3679 ; CHECK-NEXT: csrr a0, vl
3680 ; CHECK-NEXT: sw a0, 0(a2)
3683 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
3684 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3685 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 6
3686 store i32 %2, ptr %outvl
3687 ret <vscale x 1 x half> %1
3690 define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3691 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16:
3692 ; CHECK: # %bb.0: # %entry
3693 ; CHECK-NEXT: vmv1r.v v7, v8
3694 ; CHECK-NEXT: vmv1r.v v9, v8
3695 ; CHECK-NEXT: vmv1r.v v10, v8
3696 ; CHECK-NEXT: vmv1r.v v11, v8
3697 ; CHECK-NEXT: vmv1r.v v12, v8
3698 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3699 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
3700 ; CHECK-NEXT: csrr a0, vl
3701 ; CHECK-NEXT: sw a0, 0(a2)
3704 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3705 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3706 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 6
3707 store i32 %2, ptr %outvl
3708 ret <vscale x 1 x half> %1
3711 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3712 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32, i32)
3714 define <vscale x 1 x half> @test_vlseg7ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) {
3715 ; CHECK-LABEL: test_vlseg7ff_nxv1f16:
3716 ; CHECK: # %bb.0: # %entry
3717 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3718 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
3719 ; CHECK-NEXT: csrr a0, vl
3720 ; CHECK-NEXT: sw a0, 0(a2)
3723 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
3724 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3725 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 7
3726 store i32 %2, ptr %outvl
3727 ret <vscale x 1 x half> %1
3730 define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3731 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16:
3732 ; CHECK: # %bb.0: # %entry
3733 ; CHECK-NEXT: vmv1r.v v7, v8
3734 ; CHECK-NEXT: vmv1r.v v9, v8
3735 ; CHECK-NEXT: vmv1r.v v10, v8
3736 ; CHECK-NEXT: vmv1r.v v11, v8
3737 ; CHECK-NEXT: vmv1r.v v12, v8
3738 ; CHECK-NEXT: vmv1r.v v13, v8
3739 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3740 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
3741 ; CHECK-NEXT: csrr a0, vl
3742 ; CHECK-NEXT: sw a0, 0(a2)
3745 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3746 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3747 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 7
3748 store i32 %2, ptr %outvl
3749 ret <vscale x 1 x half> %1
3752 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr , i32)
3753 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, <vscale x 1 x i1>, i32, i32)
3755 define <vscale x 1 x half> @test_vlseg8ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl) {
3756 ; CHECK-LABEL: test_vlseg8ff_nxv1f16:
3757 ; CHECK: # %bb.0: # %entry
3758 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
3759 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
3760 ; CHECK-NEXT: csrr a0, vl
3761 ; CHECK-NEXT: sw a0, 0(a2)
3764 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
3765 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3766 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 8
3767 store i32 %2, ptr %outvl
3768 ret <vscale x 1 x half> %1
3771 define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3772 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16:
3773 ; CHECK: # %bb.0: # %entry
3774 ; CHECK-NEXT: vmv1r.v v7, v8
3775 ; CHECK-NEXT: vmv1r.v v9, v8
3776 ; CHECK-NEXT: vmv1r.v v10, v8
3777 ; CHECK-NEXT: vmv1r.v v11, v8
3778 ; CHECK-NEXT: vmv1r.v v12, v8
3779 ; CHECK-NEXT: vmv1r.v v13, v8
3780 ; CHECK-NEXT: vmv1r.v v14, v8
3781 ; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
3782 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
3783 ; CHECK-NEXT: csrr a0, vl
3784 ; CHECK-NEXT: sw a0, 0(a2)
3787 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3788 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 1
3789 %2 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} %0, 8
3790 store i32 %2, ptr %outvl
3791 ret <vscale x 1 x half> %1
3794 declare {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3795 declare {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32, i32)
3797 define <vscale x 1 x float> @test_vlseg2ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) {
3798 ; CHECK-LABEL: test_vlseg2ff_nxv1f32:
3799 ; CHECK: # %bb.0: # %entry
3800 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3801 ; CHECK-NEXT: vlseg2e32ff.v v7, (a0)
3802 ; CHECK-NEXT: csrr a0, vl
3803 ; CHECK-NEXT: sw a0, 0(a2)
3806 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
3807 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3808 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 2
3809 store i32 %2, ptr %outvl
3810 ret <vscale x 1 x float> %1
3813 define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3814 ; CHECK-LABEL: test_vlseg2ff_mask_nxv1f32:
3815 ; CHECK: # %bb.0: # %entry
3816 ; CHECK-NEXT: vmv1r.v v7, v8
3817 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3818 ; CHECK-NEXT: vlseg2e32ff.v v7, (a0), v0.t
3819 ; CHECK-NEXT: csrr a0, vl
3820 ; CHECK-NEXT: sw a0, 0(a2)
3823 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3824 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3825 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 2
3826 store i32 %2, ptr %outvl
3827 ret <vscale x 1 x float> %1
3830 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3831 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32, i32)
3833 define <vscale x 1 x float> @test_vlseg3ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) {
3834 ; CHECK-LABEL: test_vlseg3ff_nxv1f32:
3835 ; CHECK: # %bb.0: # %entry
3836 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3837 ; CHECK-NEXT: vlseg3e32ff.v v7, (a0)
3838 ; CHECK-NEXT: csrr a0, vl
3839 ; CHECK-NEXT: sw a0, 0(a2)
3842 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
3843 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3844 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 3
3845 store i32 %2, ptr %outvl
3846 ret <vscale x 1 x float> %1
3849 define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3850 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32:
3851 ; CHECK: # %bb.0: # %entry
3852 ; CHECK-NEXT: vmv1r.v v7, v8
3853 ; CHECK-NEXT: vmv1r.v v9, v8
3854 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3855 ; CHECK-NEXT: vlseg3e32ff.v v7, (a0), v0.t
3856 ; CHECK-NEXT: csrr a0, vl
3857 ; CHECK-NEXT: sw a0, 0(a2)
3860 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3861 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3862 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 3
3863 store i32 %2, ptr %outvl
3864 ret <vscale x 1 x float> %1
3867 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3868 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32, i32)
3870 define <vscale x 1 x float> @test_vlseg4ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) {
3871 ; CHECK-LABEL: test_vlseg4ff_nxv1f32:
3872 ; CHECK: # %bb.0: # %entry
3873 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3874 ; CHECK-NEXT: vlseg4e32ff.v v7, (a0)
3875 ; CHECK-NEXT: csrr a0, vl
3876 ; CHECK-NEXT: sw a0, 0(a2)
3879 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
3880 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3881 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 4
3882 store i32 %2, ptr %outvl
3883 ret <vscale x 1 x float> %1
3886 define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3887 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32:
3888 ; CHECK: # %bb.0: # %entry
3889 ; CHECK-NEXT: vmv1r.v v7, v8
3890 ; CHECK-NEXT: vmv1r.v v9, v8
3891 ; CHECK-NEXT: vmv1r.v v10, v8
3892 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3893 ; CHECK-NEXT: vlseg4e32ff.v v7, (a0), v0.t
3894 ; CHECK-NEXT: csrr a0, vl
3895 ; CHECK-NEXT: sw a0, 0(a2)
3898 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3899 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3900 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 4
3901 store i32 %2, ptr %outvl
3902 ret <vscale x 1 x float> %1
3905 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3906 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32, i32)
3908 define <vscale x 1 x float> @test_vlseg5ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) {
3909 ; CHECK-LABEL: test_vlseg5ff_nxv1f32:
3910 ; CHECK: # %bb.0: # %entry
3911 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3912 ; CHECK-NEXT: vlseg5e32ff.v v7, (a0)
3913 ; CHECK-NEXT: csrr a0, vl
3914 ; CHECK-NEXT: sw a0, 0(a2)
3917 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
3918 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3919 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 5
3920 store i32 %2, ptr %outvl
3921 ret <vscale x 1 x float> %1
3924 define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3925 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32:
3926 ; CHECK: # %bb.0: # %entry
3927 ; CHECK-NEXT: vmv1r.v v7, v8
3928 ; CHECK-NEXT: vmv1r.v v9, v8
3929 ; CHECK-NEXT: vmv1r.v v10, v8
3930 ; CHECK-NEXT: vmv1r.v v11, v8
3931 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3932 ; CHECK-NEXT: vlseg5e32ff.v v7, (a0), v0.t
3933 ; CHECK-NEXT: csrr a0, vl
3934 ; CHECK-NEXT: sw a0, 0(a2)
3937 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3938 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3939 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 5
3940 store i32 %2, ptr %outvl
3941 ret <vscale x 1 x float> %1
3944 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3945 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32, i32)
3947 define <vscale x 1 x float> @test_vlseg6ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) {
3948 ; CHECK-LABEL: test_vlseg6ff_nxv1f32:
3949 ; CHECK: # %bb.0: # %entry
3950 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3951 ; CHECK-NEXT: vlseg6e32ff.v v7, (a0)
3952 ; CHECK-NEXT: csrr a0, vl
3953 ; CHECK-NEXT: sw a0, 0(a2)
3956 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
3957 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3958 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 6
3959 store i32 %2, ptr %outvl
3960 ret <vscale x 1 x float> %1
3963 define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
3964 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32:
3965 ; CHECK: # %bb.0: # %entry
3966 ; CHECK-NEXT: vmv1r.v v7, v8
3967 ; CHECK-NEXT: vmv1r.v v9, v8
3968 ; CHECK-NEXT: vmv1r.v v10, v8
3969 ; CHECK-NEXT: vmv1r.v v11, v8
3970 ; CHECK-NEXT: vmv1r.v v12, v8
3971 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
3972 ; CHECK-NEXT: vlseg6e32ff.v v7, (a0), v0.t
3973 ; CHECK-NEXT: csrr a0, vl
3974 ; CHECK-NEXT: sw a0, 0(a2)
3977 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3978 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3979 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 6
3980 store i32 %2, ptr %outvl
3981 ret <vscale x 1 x float> %1
3984 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
3985 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32, i32)
3987 define <vscale x 1 x float> @test_vlseg7ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) {
3988 ; CHECK-LABEL: test_vlseg7ff_nxv1f32:
3989 ; CHECK: # %bb.0: # %entry
3990 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
3991 ; CHECK-NEXT: vlseg7e32ff.v v7, (a0)
3992 ; CHECK-NEXT: csrr a0, vl
3993 ; CHECK-NEXT: sw a0, 0(a2)
3996 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
3997 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
3998 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 7
3999 store i32 %2, ptr %outvl
4000 ret <vscale x 1 x float> %1
4003 define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
4004 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32:
4005 ; CHECK: # %bb.0: # %entry
4006 ; CHECK-NEXT: vmv1r.v v7, v8
4007 ; CHECK-NEXT: vmv1r.v v9, v8
4008 ; CHECK-NEXT: vmv1r.v v10, v8
4009 ; CHECK-NEXT: vmv1r.v v11, v8
4010 ; CHECK-NEXT: vmv1r.v v12, v8
4011 ; CHECK-NEXT: vmv1r.v v13, v8
4012 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
4013 ; CHECK-NEXT: vlseg7e32ff.v v7, (a0), v0.t
4014 ; CHECK-NEXT: csrr a0, vl
4015 ; CHECK-NEXT: sw a0, 0(a2)
4018 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
4019 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
4020 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 7
4021 store i32 %2, ptr %outvl
4022 ret <vscale x 1 x float> %1
4025 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr , i32)
4026 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, <vscale x 1 x i1>, i32, i32)
4028 define <vscale x 1 x float> @test_vlseg8ff_nxv1f32(ptr %base, i32 %vl, ptr %outvl) {
4029 ; CHECK-LABEL: test_vlseg8ff_nxv1f32:
4030 ; CHECK: # %bb.0: # %entry
4031 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
4032 ; CHECK-NEXT: vlseg8e32ff.v v7, (a0)
4033 ; CHECK-NEXT: csrr a0, vl
4034 ; CHECK-NEXT: sw a0, 0(a2)
4037 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
4038 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
4039 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 8
4040 store i32 %2, ptr %outvl
4041 ret <vscale x 1 x float> %1
4044 define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl, <vscale x 1 x i1> %mask, ptr %outvl) {
4045 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32:
4046 ; CHECK: # %bb.0: # %entry
4047 ; CHECK-NEXT: vmv1r.v v7, v8
4048 ; CHECK-NEXT: vmv1r.v v9, v8
4049 ; CHECK-NEXT: vmv1r.v v10, v8
4050 ; CHECK-NEXT: vmv1r.v v11, v8
4051 ; CHECK-NEXT: vmv1r.v v12, v8
4052 ; CHECK-NEXT: vmv1r.v v13, v8
4053 ; CHECK-NEXT: vmv1r.v v14, v8
4054 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
4055 ; CHECK-NEXT: vlseg8e32ff.v v7, (a0), v0.t
4056 ; CHECK-NEXT: csrr a0, vl
4057 ; CHECK-NEXT: sw a0, 0(a2)
4060 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
4061 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 1
4062 %2 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} %0, 8
4063 store i32 %2, ptr %outvl
4064 ret <vscale x 1 x float> %1
4067 declare {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr , i32)
4068 declare {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i32, i32)
4070 define <vscale x 8 x half> @test_vlseg2ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl) {
4071 ; CHECK-LABEL: test_vlseg2ff_nxv8f16:
4072 ; CHECK: # %bb.0: # %entry
4073 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
4074 ; CHECK-NEXT: vlseg2e16ff.v v6, (a0)
4075 ; CHECK-NEXT: csrr a0, vl
4076 ; CHECK-NEXT: sw a0, 0(a2)
4079 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
4080 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 1
4081 %2 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 2
4082 store i32 %2, ptr %outvl
4083 ret <vscale x 8 x half> %1
4086 define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
4087 ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f16:
4088 ; CHECK: # %bb.0: # %entry
4089 ; CHECK-NEXT: vmv2r.v v6, v8
4090 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
4091 ; CHECK-NEXT: vlseg2e16ff.v v6, (a0), v0.t
4092 ; CHECK-NEXT: csrr a0, vl
4093 ; CHECK-NEXT: sw a0, 0(a2)
4096 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4097 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 1
4098 %2 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 2
4099 store i32 %2, ptr %outvl
4100 ret <vscale x 8 x half> %1
4103 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr , i32)
4104 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i32, i32)
4106 define <vscale x 8 x half> @test_vlseg3ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl) {
4107 ; CHECK-LABEL: test_vlseg3ff_nxv8f16:
4108 ; CHECK: # %bb.0: # %entry
4109 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
4110 ; CHECK-NEXT: vlseg3e16ff.v v6, (a0)
4111 ; CHECK-NEXT: csrr a0, vl
4112 ; CHECK-NEXT: sw a0, 0(a2)
4115 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
4116 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 1
4117 %2 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 3
4118 store i32 %2, ptr %outvl
4119 ret <vscale x 8 x half> %1
4122 define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
4123 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16:
4124 ; CHECK: # %bb.0: # %entry
4125 ; CHECK-NEXT: vmv2r.v v6, v8
4126 ; CHECK-NEXT: vmv2r.v v10, v8
4127 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
4128 ; CHECK-NEXT: vlseg3e16ff.v v6, (a0), v0.t
4129 ; CHECK-NEXT: csrr a0, vl
4130 ; CHECK-NEXT: sw a0, 0(a2)
4133 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4134 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 1
4135 %2 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 3
4136 store i32 %2, ptr %outvl
4137 ret <vscale x 8 x half> %1
4140 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr , i32)
4141 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, <vscale x 8 x i1>, i32, i32)
4143 define <vscale x 8 x half> @test_vlseg4ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl) {
4144 ; CHECK-LABEL: test_vlseg4ff_nxv8f16:
4145 ; CHECK: # %bb.0: # %entry
4146 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
4147 ; CHECK-NEXT: vlseg4e16ff.v v6, (a0)
4148 ; CHECK-NEXT: csrr a0, vl
4149 ; CHECK-NEXT: sw a0, 0(a2)
4152 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
4153 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 1
4154 %2 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 4
4155 store i32 %2, ptr %outvl
4156 ret <vscale x 8 x half> %1
4159 define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
4160 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16:
4161 ; CHECK: # %bb.0: # %entry
4162 ; CHECK-NEXT: vmv2r.v v6, v8
4163 ; CHECK-NEXT: vmv2r.v v10, v8
4164 ; CHECK-NEXT: vmv2r.v v12, v8
4165 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
4166 ; CHECK-NEXT: vlseg4e16ff.v v6, (a0), v0.t
4167 ; CHECK-NEXT: csrr a0, vl
4168 ; CHECK-NEXT: sw a0, 0(a2)
4171 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4172 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 1
4173 %2 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} %0, 4
4174 store i32 %2, ptr %outvl
4175 ret <vscale x 8 x half> %1
4178 declare {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr , i32)
4179 declare {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, <vscale x 8 x i1>, i32, i32)
4181 define <vscale x 8 x float> @test_vlseg2ff_nxv8f32(ptr %base, i32 %vl, ptr %outvl) {
4182 ; CHECK-LABEL: test_vlseg2ff_nxv8f32:
4183 ; CHECK: # %bb.0: # %entry
4184 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
4185 ; CHECK-NEXT: vlseg2e32ff.v v4, (a0)
4186 ; CHECK-NEXT: csrr a0, vl
4187 ; CHECK-NEXT: sw a0, 0(a2)
4190 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %vl)
4191 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>, i32} %0, 1
4192 %2 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>, i32} %0, 2
4193 store i32 %2, ptr %outvl
4194 ret <vscale x 8 x float> %1
4197 define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, i32 %vl, <vscale x 8 x i1> %mask, ptr %outvl) {
4198 ; CHECK-LABEL: test_vlseg2ff_mask_nxv8f32:
4199 ; CHECK: # %bb.0: # %entry
4200 ; CHECK-NEXT: vmv4r.v v4, v8
4201 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
4202 ; CHECK-NEXT: vlseg2e32ff.v v4, (a0), v0.t
4203 ; CHECK-NEXT: csrr a0, vl
4204 ; CHECK-NEXT: sw a0, 0(a2)
4207 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
4208 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>, i32} %0, 1
4209 %2 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>, i32} %0, 2
4210 store i32 %2, ptr %outvl
4211 ret <vscale x 8 x float> %1
4214 declare {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr , i32)
4215 declare {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i32, i32)
4217 define <vscale x 2 x double> @test_vlseg2ff_nxv2f64(ptr %base, i32 %vl, ptr %outvl) {
4218 ; CHECK-LABEL: test_vlseg2ff_nxv2f64:
4219 ; CHECK: # %bb.0: # %entry
4220 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4221 ; CHECK-NEXT: vlseg2e64ff.v v6, (a0)
4222 ; CHECK-NEXT: csrr a0, vl
4223 ; CHECK-NEXT: sw a0, 0(a2)
4226 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
4227 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 1
4228 %2 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 2
4229 store i32 %2, ptr %outvl
4230 ret <vscale x 2 x double> %1
4233 define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4234 ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f64:
4235 ; CHECK: # %bb.0: # %entry
4236 ; CHECK-NEXT: vmv2r.v v6, v8
4237 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
4238 ; CHECK-NEXT: vlseg2e64ff.v v6, (a0), v0.t
4239 ; CHECK-NEXT: csrr a0, vl
4240 ; CHECK-NEXT: sw a0, 0(a2)
4243 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4244 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 1
4245 %2 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 2
4246 store i32 %2, ptr %outvl
4247 ret <vscale x 2 x double> %1
4250 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr , i32)
4251 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i32, i32)
4253 define <vscale x 2 x double> @test_vlseg3ff_nxv2f64(ptr %base, i32 %vl, ptr %outvl) {
4254 ; CHECK-LABEL: test_vlseg3ff_nxv2f64:
4255 ; CHECK: # %bb.0: # %entry
4256 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4257 ; CHECK-NEXT: vlseg3e64ff.v v6, (a0)
4258 ; CHECK-NEXT: csrr a0, vl
4259 ; CHECK-NEXT: sw a0, 0(a2)
4262 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
4263 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 1
4264 %2 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 3
4265 store i32 %2, ptr %outvl
4266 ret <vscale x 2 x double> %1
4269 define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4270 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64:
4271 ; CHECK: # %bb.0: # %entry
4272 ; CHECK-NEXT: vmv2r.v v6, v8
4273 ; CHECK-NEXT: vmv2r.v v10, v8
4274 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
4275 ; CHECK-NEXT: vlseg3e64ff.v v6, (a0), v0.t
4276 ; CHECK-NEXT: csrr a0, vl
4277 ; CHECK-NEXT: sw a0, 0(a2)
4280 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4281 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 1
4282 %2 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 3
4283 store i32 %2, ptr %outvl
4284 ret <vscale x 2 x double> %1
4287 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr , i32)
4288 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, <vscale x 2 x i1>, i32, i32)
4290 define <vscale x 2 x double> @test_vlseg4ff_nxv2f64(ptr %base, i32 %vl, ptr %outvl) {
4291 ; CHECK-LABEL: test_vlseg4ff_nxv2f64:
4292 ; CHECK: # %bb.0: # %entry
4293 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
4294 ; CHECK-NEXT: vlseg4e64ff.v v6, (a0)
4295 ; CHECK-NEXT: csrr a0, vl
4296 ; CHECK-NEXT: sw a0, 0(a2)
4299 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
4300 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 1
4301 %2 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 4
4302 store i32 %2, ptr %outvl
4303 ret <vscale x 2 x double> %1
4306 define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4307 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64:
4308 ; CHECK: # %bb.0: # %entry
4309 ; CHECK-NEXT: vmv2r.v v6, v8
4310 ; CHECK-NEXT: vmv2r.v v10, v8
4311 ; CHECK-NEXT: vmv2r.v v12, v8
4312 ; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
4313 ; CHECK-NEXT: vlseg4e64ff.v v6, (a0), v0.t
4314 ; CHECK-NEXT: csrr a0, vl
4315 ; CHECK-NEXT: sw a0, 0(a2)
4318 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4319 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 1
4320 %2 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} %0, 4
4321 store i32 %2, ptr %outvl
4322 ret <vscale x 2 x double> %1
4325 declare {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
4326 declare {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32, i32)
4328 define <vscale x 4 x half> @test_vlseg2ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) {
4329 ; CHECK-LABEL: test_vlseg2ff_nxv4f16:
4330 ; CHECK: # %bb.0: # %entry
4331 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4332 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
4333 ; CHECK-NEXT: csrr a0, vl
4334 ; CHECK-NEXT: sw a0, 0(a2)
4337 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
4338 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4339 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 2
4340 store i32 %2, ptr %outvl
4341 ret <vscale x 4 x half> %1
4344 define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4345 ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f16:
4346 ; CHECK: # %bb.0: # %entry
4347 ; CHECK-NEXT: vmv1r.v v7, v8
4348 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4349 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
4350 ; CHECK-NEXT: csrr a0, vl
4351 ; CHECK-NEXT: sw a0, 0(a2)
4354 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4355 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4356 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 2
4357 store i32 %2, ptr %outvl
4358 ret <vscale x 4 x half> %1
4361 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
4362 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32, i32)
4364 define <vscale x 4 x half> @test_vlseg3ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) {
4365 ; CHECK-LABEL: test_vlseg3ff_nxv4f16:
4366 ; CHECK: # %bb.0: # %entry
4367 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4368 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
4369 ; CHECK-NEXT: csrr a0, vl
4370 ; CHECK-NEXT: sw a0, 0(a2)
4373 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
4374 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4375 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 3
4376 store i32 %2, ptr %outvl
4377 ret <vscale x 4 x half> %1
4380 define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4381 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16:
4382 ; CHECK: # %bb.0: # %entry
4383 ; CHECK-NEXT: vmv1r.v v7, v8
4384 ; CHECK-NEXT: vmv1r.v v9, v8
4385 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4386 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
4387 ; CHECK-NEXT: csrr a0, vl
4388 ; CHECK-NEXT: sw a0, 0(a2)
4391 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4392 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4393 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 3
4394 store i32 %2, ptr %outvl
4395 ret <vscale x 4 x half> %1
4398 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
4399 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32, i32)
4401 define <vscale x 4 x half> @test_vlseg4ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) {
4402 ; CHECK-LABEL: test_vlseg4ff_nxv4f16:
4403 ; CHECK: # %bb.0: # %entry
4404 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4405 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
4406 ; CHECK-NEXT: csrr a0, vl
4407 ; CHECK-NEXT: sw a0, 0(a2)
4410 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
4411 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4412 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 4
4413 store i32 %2, ptr %outvl
4414 ret <vscale x 4 x half> %1
4417 define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4418 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16:
4419 ; CHECK: # %bb.0: # %entry
4420 ; CHECK-NEXT: vmv1r.v v7, v8
4421 ; CHECK-NEXT: vmv1r.v v9, v8
4422 ; CHECK-NEXT: vmv1r.v v10, v8
4423 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4424 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
4425 ; CHECK-NEXT: csrr a0, vl
4426 ; CHECK-NEXT: sw a0, 0(a2)
4429 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4430 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4431 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 4
4432 store i32 %2, ptr %outvl
4433 ret <vscale x 4 x half> %1
4436 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
4437 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32, i32)
4439 define <vscale x 4 x half> @test_vlseg5ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) {
4440 ; CHECK-LABEL: test_vlseg5ff_nxv4f16:
4441 ; CHECK: # %bb.0: # %entry
4442 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4443 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
4444 ; CHECK-NEXT: csrr a0, vl
4445 ; CHECK-NEXT: sw a0, 0(a2)
4448 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
4449 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4450 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 5
4451 store i32 %2, ptr %outvl
4452 ret <vscale x 4 x half> %1
4455 define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4456 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16:
4457 ; CHECK: # %bb.0: # %entry
4458 ; CHECK-NEXT: vmv1r.v v7, v8
4459 ; CHECK-NEXT: vmv1r.v v9, v8
4460 ; CHECK-NEXT: vmv1r.v v10, v8
4461 ; CHECK-NEXT: vmv1r.v v11, v8
4462 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4463 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
4464 ; CHECK-NEXT: csrr a0, vl
4465 ; CHECK-NEXT: sw a0, 0(a2)
4468 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4469 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4470 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 5
4471 store i32 %2, ptr %outvl
4472 ret <vscale x 4 x half> %1
4475 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
4476 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32, i32)
4478 define <vscale x 4 x half> @test_vlseg6ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) {
4479 ; CHECK-LABEL: test_vlseg6ff_nxv4f16:
4480 ; CHECK: # %bb.0: # %entry
4481 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4482 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
4483 ; CHECK-NEXT: csrr a0, vl
4484 ; CHECK-NEXT: sw a0, 0(a2)
4487 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
4488 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4489 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 6
4490 store i32 %2, ptr %outvl
4491 ret <vscale x 4 x half> %1
4494 define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4495 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16:
4496 ; CHECK: # %bb.0: # %entry
4497 ; CHECK-NEXT: vmv1r.v v7, v8
4498 ; CHECK-NEXT: vmv1r.v v9, v8
4499 ; CHECK-NEXT: vmv1r.v v10, v8
4500 ; CHECK-NEXT: vmv1r.v v11, v8
4501 ; CHECK-NEXT: vmv1r.v v12, v8
4502 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4503 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
4504 ; CHECK-NEXT: csrr a0, vl
4505 ; CHECK-NEXT: sw a0, 0(a2)
4508 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4509 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4510 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 6
4511 store i32 %2, ptr %outvl
4512 ret <vscale x 4 x half> %1
4515 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
4516 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32, i32)
4518 define <vscale x 4 x half> @test_vlseg7ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) {
4519 ; CHECK-LABEL: test_vlseg7ff_nxv4f16:
4520 ; CHECK: # %bb.0: # %entry
4521 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4522 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
4523 ; CHECK-NEXT: csrr a0, vl
4524 ; CHECK-NEXT: sw a0, 0(a2)
4527 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
4528 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4529 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 7
4530 store i32 %2, ptr %outvl
4531 ret <vscale x 4 x half> %1
4534 define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4535 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16:
4536 ; CHECK: # %bb.0: # %entry
4537 ; CHECK-NEXT: vmv1r.v v7, v8
4538 ; CHECK-NEXT: vmv1r.v v9, v8
4539 ; CHECK-NEXT: vmv1r.v v10, v8
4540 ; CHECK-NEXT: vmv1r.v v11, v8
4541 ; CHECK-NEXT: vmv1r.v v12, v8
4542 ; CHECK-NEXT: vmv1r.v v13, v8
4543 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4544 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
4545 ; CHECK-NEXT: csrr a0, vl
4546 ; CHECK-NEXT: sw a0, 0(a2)
4549 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4550 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4551 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 7
4552 store i32 %2, ptr %outvl
4553 ret <vscale x 4 x half> %1
4556 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr , i32)
4557 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, <vscale x 4 x i1>, i32, i32)
4559 define <vscale x 4 x half> @test_vlseg8ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl) {
4560 ; CHECK-LABEL: test_vlseg8ff_nxv4f16:
4561 ; CHECK: # %bb.0: # %entry
4562 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
4563 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
4564 ; CHECK-NEXT: csrr a0, vl
4565 ; CHECK-NEXT: sw a0, 0(a2)
4568 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
4569 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4570 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 8
4571 store i32 %2, ptr %outvl
4572 ret <vscale x 4 x half> %1
4575 define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4576 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16:
4577 ; CHECK: # %bb.0: # %entry
4578 ; CHECK-NEXT: vmv1r.v v7, v8
4579 ; CHECK-NEXT: vmv1r.v v9, v8
4580 ; CHECK-NEXT: vmv1r.v v10, v8
4581 ; CHECK-NEXT: vmv1r.v v11, v8
4582 ; CHECK-NEXT: vmv1r.v v12, v8
4583 ; CHECK-NEXT: vmv1r.v v13, v8
4584 ; CHECK-NEXT: vmv1r.v v14, v8
4585 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
4586 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
4587 ; CHECK-NEXT: csrr a0, vl
4588 ; CHECK-NEXT: sw a0, 0(a2)
4591 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4592 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 1
4593 %2 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} %0, 8
4594 store i32 %2, ptr %outvl
4595 ret <vscale x 4 x half> %1
4598 declare {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
4599 declare {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32, i32)
4601 define <vscale x 2 x half> @test_vlseg2ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) {
4602 ; CHECK-LABEL: test_vlseg2ff_nxv2f16:
4603 ; CHECK: # %bb.0: # %entry
4604 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4605 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0)
4606 ; CHECK-NEXT: csrr a0, vl
4607 ; CHECK-NEXT: sw a0, 0(a2)
4610 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
4611 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4612 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 2
4613 store i32 %2, ptr %outvl
4614 ret <vscale x 2 x half> %1
4617 define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4618 ; CHECK-LABEL: test_vlseg2ff_mask_nxv2f16:
4619 ; CHECK: # %bb.0: # %entry
4620 ; CHECK-NEXT: vmv1r.v v7, v8
4621 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4622 ; CHECK-NEXT: vlseg2e16ff.v v7, (a0), v0.t
4623 ; CHECK-NEXT: csrr a0, vl
4624 ; CHECK-NEXT: sw a0, 0(a2)
4627 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4628 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4629 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 2
4630 store i32 %2, ptr %outvl
4631 ret <vscale x 2 x half> %1
4634 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
4635 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32, i32)
4637 define <vscale x 2 x half> @test_vlseg3ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) {
4638 ; CHECK-LABEL: test_vlseg3ff_nxv2f16:
4639 ; CHECK: # %bb.0: # %entry
4640 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4641 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0)
4642 ; CHECK-NEXT: csrr a0, vl
4643 ; CHECK-NEXT: sw a0, 0(a2)
4646 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
4647 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4648 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 3
4649 store i32 %2, ptr %outvl
4650 ret <vscale x 2 x half> %1
4653 define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4654 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16:
4655 ; CHECK: # %bb.0: # %entry
4656 ; CHECK-NEXT: vmv1r.v v7, v8
4657 ; CHECK-NEXT: vmv1r.v v9, v8
4658 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4659 ; CHECK-NEXT: vlseg3e16ff.v v7, (a0), v0.t
4660 ; CHECK-NEXT: csrr a0, vl
4661 ; CHECK-NEXT: sw a0, 0(a2)
4664 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4665 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4666 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 3
4667 store i32 %2, ptr %outvl
4668 ret <vscale x 2 x half> %1
4671 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
4672 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32, i32)
4674 define <vscale x 2 x half> @test_vlseg4ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) {
4675 ; CHECK-LABEL: test_vlseg4ff_nxv2f16:
4676 ; CHECK: # %bb.0: # %entry
4677 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4678 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0)
4679 ; CHECK-NEXT: csrr a0, vl
4680 ; CHECK-NEXT: sw a0, 0(a2)
4683 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
4684 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4685 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 4
4686 store i32 %2, ptr %outvl
4687 ret <vscale x 2 x half> %1
4690 define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4691 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16:
4692 ; CHECK: # %bb.0: # %entry
4693 ; CHECK-NEXT: vmv1r.v v7, v8
4694 ; CHECK-NEXT: vmv1r.v v9, v8
4695 ; CHECK-NEXT: vmv1r.v v10, v8
4696 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4697 ; CHECK-NEXT: vlseg4e16ff.v v7, (a0), v0.t
4698 ; CHECK-NEXT: csrr a0, vl
4699 ; CHECK-NEXT: sw a0, 0(a2)
4702 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4703 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4704 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 4
4705 store i32 %2, ptr %outvl
4706 ret <vscale x 2 x half> %1
4709 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
4710 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32, i32)
4712 define <vscale x 2 x half> @test_vlseg5ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) {
4713 ; CHECK-LABEL: test_vlseg5ff_nxv2f16:
4714 ; CHECK: # %bb.0: # %entry
4715 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4716 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0)
4717 ; CHECK-NEXT: csrr a0, vl
4718 ; CHECK-NEXT: sw a0, 0(a2)
4721 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
4722 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4723 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 5
4724 store i32 %2, ptr %outvl
4725 ret <vscale x 2 x half> %1
4728 define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4729 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16:
4730 ; CHECK: # %bb.0: # %entry
4731 ; CHECK-NEXT: vmv1r.v v7, v8
4732 ; CHECK-NEXT: vmv1r.v v9, v8
4733 ; CHECK-NEXT: vmv1r.v v10, v8
4734 ; CHECK-NEXT: vmv1r.v v11, v8
4735 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4736 ; CHECK-NEXT: vlseg5e16ff.v v7, (a0), v0.t
4737 ; CHECK-NEXT: csrr a0, vl
4738 ; CHECK-NEXT: sw a0, 0(a2)
4741 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4742 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4743 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 5
4744 store i32 %2, ptr %outvl
4745 ret <vscale x 2 x half> %1
4748 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
4749 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32, i32)
4751 define <vscale x 2 x half> @test_vlseg6ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) {
4752 ; CHECK-LABEL: test_vlseg6ff_nxv2f16:
4753 ; CHECK: # %bb.0: # %entry
4754 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4755 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0)
4756 ; CHECK-NEXT: csrr a0, vl
4757 ; CHECK-NEXT: sw a0, 0(a2)
4760 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
4761 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4762 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 6
4763 store i32 %2, ptr %outvl
4764 ret <vscale x 2 x half> %1
4767 define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4768 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16:
4769 ; CHECK: # %bb.0: # %entry
4770 ; CHECK-NEXT: vmv1r.v v7, v8
4771 ; CHECK-NEXT: vmv1r.v v9, v8
4772 ; CHECK-NEXT: vmv1r.v v10, v8
4773 ; CHECK-NEXT: vmv1r.v v11, v8
4774 ; CHECK-NEXT: vmv1r.v v12, v8
4775 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4776 ; CHECK-NEXT: vlseg6e16ff.v v7, (a0), v0.t
4777 ; CHECK-NEXT: csrr a0, vl
4778 ; CHECK-NEXT: sw a0, 0(a2)
4781 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4782 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4783 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 6
4784 store i32 %2, ptr %outvl
4785 ret <vscale x 2 x half> %1
4788 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
4789 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32, i32)
4791 define <vscale x 2 x half> @test_vlseg7ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) {
4792 ; CHECK-LABEL: test_vlseg7ff_nxv2f16:
4793 ; CHECK: # %bb.0: # %entry
4794 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4795 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0)
4796 ; CHECK-NEXT: csrr a0, vl
4797 ; CHECK-NEXT: sw a0, 0(a2)
4800 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
4801 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4802 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 7
4803 store i32 %2, ptr %outvl
4804 ret <vscale x 2 x half> %1
4807 define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4808 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16:
4809 ; CHECK: # %bb.0: # %entry
4810 ; CHECK-NEXT: vmv1r.v v7, v8
4811 ; CHECK-NEXT: vmv1r.v v9, v8
4812 ; CHECK-NEXT: vmv1r.v v10, v8
4813 ; CHECK-NEXT: vmv1r.v v11, v8
4814 ; CHECK-NEXT: vmv1r.v v12, v8
4815 ; CHECK-NEXT: vmv1r.v v13, v8
4816 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4817 ; CHECK-NEXT: vlseg7e16ff.v v7, (a0), v0.t
4818 ; CHECK-NEXT: csrr a0, vl
4819 ; CHECK-NEXT: sw a0, 0(a2)
4822 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4823 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4824 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 7
4825 store i32 %2, ptr %outvl
4826 ret <vscale x 2 x half> %1
4829 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr , i32)
4830 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, <vscale x 2 x i1>, i32, i32)
4832 define <vscale x 2 x half> @test_vlseg8ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl) {
4833 ; CHECK-LABEL: test_vlseg8ff_nxv2f16:
4834 ; CHECK: # %bb.0: # %entry
4835 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
4836 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0)
4837 ; CHECK-NEXT: csrr a0, vl
4838 ; CHECK-NEXT: sw a0, 0(a2)
4841 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
4842 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4843 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 8
4844 store i32 %2, ptr %outvl
4845 ret <vscale x 2 x half> %1
4848 define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl, <vscale x 2 x i1> %mask, ptr %outvl) {
4849 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16:
4850 ; CHECK: # %bb.0: # %entry
4851 ; CHECK-NEXT: vmv1r.v v7, v8
4852 ; CHECK-NEXT: vmv1r.v v9, v8
4853 ; CHECK-NEXT: vmv1r.v v10, v8
4854 ; CHECK-NEXT: vmv1r.v v11, v8
4855 ; CHECK-NEXT: vmv1r.v v12, v8
4856 ; CHECK-NEXT: vmv1r.v v13, v8
4857 ; CHECK-NEXT: vmv1r.v v14, v8
4858 ; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
4859 ; CHECK-NEXT: vlseg8e16ff.v v7, (a0), v0.t
4860 ; CHECK-NEXT: csrr a0, vl
4861 ; CHECK-NEXT: sw a0, 0(a2)
4864 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4865 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 1
4866 %2 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} %0, 8
4867 store i32 %2, ptr %outvl
4868 ret <vscale x 2 x half> %1
4871 declare {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr , i32)
4872 declare {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i32, i32)
4874 define <vscale x 4 x float> @test_vlseg2ff_nxv4f32(ptr %base, i32 %vl, ptr %outvl) {
4875 ; CHECK-LABEL: test_vlseg2ff_nxv4f32:
4876 ; CHECK: # %bb.0: # %entry
4877 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4878 ; CHECK-NEXT: vlseg2e32ff.v v6, (a0)
4879 ; CHECK-NEXT: csrr a0, vl
4880 ; CHECK-NEXT: sw a0, 0(a2)
4883 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
4884 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 1
4885 %2 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 2
4886 store i32 %2, ptr %outvl
4887 ret <vscale x 4 x float> %1
4890 define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4891 ; CHECK-LABEL: test_vlseg2ff_mask_nxv4f32:
4892 ; CHECK: # %bb.0: # %entry
4893 ; CHECK-NEXT: vmv2r.v v6, v8
4894 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
4895 ; CHECK-NEXT: vlseg2e32ff.v v6, (a0), v0.t
4896 ; CHECK-NEXT: csrr a0, vl
4897 ; CHECK-NEXT: sw a0, 0(a2)
4900 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4901 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 1
4902 %2 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 2
4903 store i32 %2, ptr %outvl
4904 ret <vscale x 4 x float> %1
4907 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr , i32)
4908 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i32, i32)
4910 define <vscale x 4 x float> @test_vlseg3ff_nxv4f32(ptr %base, i32 %vl, ptr %outvl) {
4911 ; CHECK-LABEL: test_vlseg3ff_nxv4f32:
4912 ; CHECK: # %bb.0: # %entry
4913 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4914 ; CHECK-NEXT: vlseg3e32ff.v v6, (a0)
4915 ; CHECK-NEXT: csrr a0, vl
4916 ; CHECK-NEXT: sw a0, 0(a2)
4919 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
4920 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 1
4921 %2 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 3
4922 store i32 %2, ptr %outvl
4923 ret <vscale x 4 x float> %1
4926 define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4927 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32:
4928 ; CHECK: # %bb.0: # %entry
4929 ; CHECK-NEXT: vmv2r.v v6, v8
4930 ; CHECK-NEXT: vmv2r.v v10, v8
4931 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
4932 ; CHECK-NEXT: vlseg3e32ff.v v6, (a0), v0.t
4933 ; CHECK-NEXT: csrr a0, vl
4934 ; CHECK-NEXT: sw a0, 0(a2)
4937 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4938 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 1
4939 %2 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 3
4940 store i32 %2, ptr %outvl
4941 ret <vscale x 4 x float> %1
4944 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr , i32)
4945 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, <vscale x 4 x i1>, i32, i32)
4947 define <vscale x 4 x float> @test_vlseg4ff_nxv4f32(ptr %base, i32 %vl, ptr %outvl) {
4948 ; CHECK-LABEL: test_vlseg4ff_nxv4f32:
4949 ; CHECK: # %bb.0: # %entry
4950 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
4951 ; CHECK-NEXT: vlseg4e32ff.v v6, (a0)
4952 ; CHECK-NEXT: csrr a0, vl
4953 ; CHECK-NEXT: sw a0, 0(a2)
4956 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
4957 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 1
4958 %2 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 4
4959 store i32 %2, ptr %outvl
4960 ret <vscale x 4 x float> %1
4963 define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl, <vscale x 4 x i1> %mask, ptr %outvl) {
4964 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32:
4965 ; CHECK: # %bb.0: # %entry
4966 ; CHECK-NEXT: vmv2r.v v6, v8
4967 ; CHECK-NEXT: vmv2r.v v10, v8
4968 ; CHECK-NEXT: vmv2r.v v12, v8
4969 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
4970 ; CHECK-NEXT: vlseg4e32ff.v v6, (a0), v0.t
4971 ; CHECK-NEXT: csrr a0, vl
4972 ; CHECK-NEXT: sw a0, 0(a2)
4975 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4976 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 1
4977 %2 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} %0, 4
4978 store i32 %2, ptr %outvl
4979 ret <vscale x 4 x float> %1