1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv32 -mattr=+zve64d,+f,+d,+zfh,+zvfh \
3 ; RUN: -verify-machineinstrs < %s | FileCheck %s
5 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, i32, i32)
6 declare {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i16>, ptr, i32, <vscale x 16 x i1>, i32, i32)
8 define <vscale x 16 x i16> @test_vlsseg2_nxv16i16(ptr %base, i32 %offset, i32 %vl) {
9 ; CHECK-LABEL: test_vlsseg2_nxv16i16:
10 ; CHECK: # %bb.0: # %entry
11 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
12 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
15 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %offset, i32 %vl)
16 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 1
17 ret <vscale x 16 x i16> %1
20 define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
21 ; CHECK-LABEL: test_vlsseg2_mask_nxv16i16:
22 ; CHECK: # %bb.0: # %entry
23 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
24 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
25 ; CHECK-NEXT: vmv4r.v v8, v4
26 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
29 %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %offset, i32 %vl)
30 %1 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %0, 0
31 %2 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.mask.nxv16i16(<vscale x 16 x i16> %1,<vscale x 16 x i16> %1, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
32 %3 = extractvalue {<vscale x 16 x i16>,<vscale x 16 x i16>} %2, 1
33 ret <vscale x 16 x i16> %3
36 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, i32)
37 declare {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>, i32, i32)
39 define <vscale x 1 x i8> @test_vlsseg2_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
40 ; CHECK-LABEL: test_vlsseg2_nxv1i8:
41 ; CHECK: # %bb.0: # %entry
42 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
43 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
46 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
47 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
48 ret <vscale x 1 x i8> %1
51 define <vscale x 1 x i8> @test_vlsseg2_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
52 ; CHECK-LABEL: test_vlsseg2_mask_nxv1i8:
53 ; CHECK: # %bb.0: # %entry
54 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
55 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
56 ; CHECK-NEXT: vmv1r.v v8, v7
57 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
60 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
61 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
62 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
63 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
64 ret <vscale x 1 x i8> %3
67 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, i32)
68 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>, i32, i32)
70 define <vscale x 1 x i8> @test_vlsseg3_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
71 ; CHECK-LABEL: test_vlsseg3_nxv1i8:
72 ; CHECK: # %bb.0: # %entry
73 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
74 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
77 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
78 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
79 ret <vscale x 1 x i8> %1
82 define <vscale x 1 x i8> @test_vlsseg3_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
83 ; CHECK-LABEL: test_vlsseg3_mask_nxv1i8:
84 ; CHECK: # %bb.0: # %entry
85 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
86 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
87 ; CHECK-NEXT: vmv1r.v v8, v7
88 ; CHECK-NEXT: vmv1r.v v9, v7
89 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
92 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
93 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
94 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
95 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
96 ret <vscale x 1 x i8> %3
99 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, i32)
100 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>, i32, i32)
102 define <vscale x 1 x i8> @test_vlsseg4_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
103 ; CHECK-LABEL: test_vlsseg4_nxv1i8:
104 ; CHECK: # %bb.0: # %entry
105 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
106 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
109 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
110 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
111 ret <vscale x 1 x i8> %1
114 define <vscale x 1 x i8> @test_vlsseg4_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
115 ; CHECK-LABEL: test_vlsseg4_mask_nxv1i8:
116 ; CHECK: # %bb.0: # %entry
117 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
118 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
119 ; CHECK-NEXT: vmv1r.v v8, v7
120 ; CHECK-NEXT: vmv1r.v v9, v7
121 ; CHECK-NEXT: vmv1r.v v10, v7
122 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
125 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
126 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
127 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
128 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
129 ret <vscale x 1 x i8> %3
132 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, i32)
133 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>, i32, i32)
135 define <vscale x 1 x i8> @test_vlsseg5_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
136 ; CHECK-LABEL: test_vlsseg5_nxv1i8:
137 ; CHECK: # %bb.0: # %entry
138 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
139 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
142 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
143 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
144 ret <vscale x 1 x i8> %1
147 define <vscale x 1 x i8> @test_vlsseg5_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
148 ; CHECK-LABEL: test_vlsseg5_mask_nxv1i8:
149 ; CHECK: # %bb.0: # %entry
150 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
151 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
152 ; CHECK-NEXT: vmv1r.v v8, v7
153 ; CHECK-NEXT: vmv1r.v v9, v7
154 ; CHECK-NEXT: vmv1r.v v10, v7
155 ; CHECK-NEXT: vmv1r.v v11, v7
156 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
159 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
160 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
161 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
162 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
163 ret <vscale x 1 x i8> %3
166 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, i32)
167 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>, i32, i32)
169 define <vscale x 1 x i8> @test_vlsseg6_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
170 ; CHECK-LABEL: test_vlsseg6_nxv1i8:
171 ; CHECK: # %bb.0: # %entry
172 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
173 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
176 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
177 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
178 ret <vscale x 1 x i8> %1
181 define <vscale x 1 x i8> @test_vlsseg6_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
182 ; CHECK-LABEL: test_vlsseg6_mask_nxv1i8:
183 ; CHECK: # %bb.0: # %entry
184 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
185 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
186 ; CHECK-NEXT: vmv1r.v v8, v7
187 ; CHECK-NEXT: vmv1r.v v9, v7
188 ; CHECK-NEXT: vmv1r.v v10, v7
189 ; CHECK-NEXT: vmv1r.v v11, v7
190 ; CHECK-NEXT: vmv1r.v v12, v7
191 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
194 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
195 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
196 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
197 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
198 ret <vscale x 1 x i8> %3
201 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, i32)
202 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>, i32, i32)
204 define <vscale x 1 x i8> @test_vlsseg7_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
205 ; CHECK-LABEL: test_vlsseg7_nxv1i8:
206 ; CHECK: # %bb.0: # %entry
207 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
208 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
211 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
212 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
213 ret <vscale x 1 x i8> %1
216 define <vscale x 1 x i8> @test_vlsseg7_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
217 ; CHECK-LABEL: test_vlsseg7_mask_nxv1i8:
218 ; CHECK: # %bb.0: # %entry
219 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
220 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
221 ; CHECK-NEXT: vmv1r.v v8, v7
222 ; CHECK-NEXT: vmv1r.v v9, v7
223 ; CHECK-NEXT: vmv1r.v v10, v7
224 ; CHECK-NEXT: vmv1r.v v11, v7
225 ; CHECK-NEXT: vmv1r.v v12, v7
226 ; CHECK-NEXT: vmv1r.v v13, v7
227 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
230 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
231 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
232 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
233 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
234 ret <vscale x 1 x i8> %3
237 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, i32)
238 declare {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, ptr, i32, <vscale x 1 x i1>, i32, i32)
240 define <vscale x 1 x i8> @test_vlsseg8_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
241 ; CHECK-LABEL: test_vlsseg8_nxv1i8:
242 ; CHECK: # %bb.0: # %entry
243 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
244 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
247 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
248 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 1
249 ret <vscale x 1 x i8> %1
252 define <vscale x 1 x i8> @test_vlsseg8_mask_nxv1i8(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
253 ; CHECK-LABEL: test_vlsseg8_mask_nxv1i8:
254 ; CHECK: # %bb.0: # %entry
255 ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
256 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
257 ; CHECK-NEXT: vmv1r.v v8, v7
258 ; CHECK-NEXT: vmv1r.v v9, v7
259 ; CHECK-NEXT: vmv1r.v v10, v7
260 ; CHECK-NEXT: vmv1r.v v11, v7
261 ; CHECK-NEXT: vmv1r.v v12, v7
262 ; CHECK-NEXT: vmv1r.v v13, v7
263 ; CHECK-NEXT: vmv1r.v v14, v7
264 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
267 %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
268 %1 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %0, 0
269 %2 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.mask.nxv1i8(<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1,<vscale x 1 x i8> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
270 %3 = extractvalue {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} %2, 1
271 ret <vscale x 1 x i8> %3
274 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i32, i32)
275 declare {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>, i32, i32)
277 define <vscale x 16 x i8> @test_vlsseg2_nxv16i8(ptr %base, i32 %offset, i32 %vl) {
278 ; CHECK-LABEL: test_vlsseg2_nxv16i8:
279 ; CHECK: # %bb.0: # %entry
280 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
281 ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
284 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
285 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
286 ret <vscale x 16 x i8> %1
289 define <vscale x 16 x i8> @test_vlsseg2_mask_nxv16i8(ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
290 ; CHECK-LABEL: test_vlsseg2_mask_nxv16i8:
291 ; CHECK: # %bb.0: # %entry
292 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
293 ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1
294 ; CHECK-NEXT: vmv2r.v v8, v6
295 ; CHECK-NEXT: vlsseg2e8.v v6, (a0), a1, v0.t
298 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
299 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
300 %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
301 %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
302 ret <vscale x 16 x i8> %3
305 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i32, i32)
306 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>, i32, i32)
308 define <vscale x 16 x i8> @test_vlsseg3_nxv16i8(ptr %base, i32 %offset, i32 %vl) {
309 ; CHECK-LABEL: test_vlsseg3_nxv16i8:
310 ; CHECK: # %bb.0: # %entry
311 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
312 ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1
315 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
316 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
317 ret <vscale x 16 x i8> %1
320 define <vscale x 16 x i8> @test_vlsseg3_mask_nxv16i8(ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
321 ; CHECK-LABEL: test_vlsseg3_mask_nxv16i8:
322 ; CHECK: # %bb.0: # %entry
323 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
324 ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1
325 ; CHECK-NEXT: vmv2r.v v8, v6
326 ; CHECK-NEXT: vmv2r.v v10, v6
327 ; CHECK-NEXT: vlsseg3e8.v v6, (a0), a1, v0.t
330 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
331 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
332 %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
333 %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
334 ret <vscale x 16 x i8> %3
337 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i32, i32)
338 declare {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, ptr, i32, <vscale x 16 x i1>, i32, i32)
340 define <vscale x 16 x i8> @test_vlsseg4_nxv16i8(ptr %base, i32 %offset, i32 %vl) {
341 ; CHECK-LABEL: test_vlsseg4_nxv16i8:
342 ; CHECK: # %bb.0: # %entry
343 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
344 ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1
347 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
348 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 1
349 ret <vscale x 16 x i8> %1
352 define <vscale x 16 x i8> @test_vlsseg4_mask_nxv16i8(ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
353 ; CHECK-LABEL: test_vlsseg4_mask_nxv16i8:
354 ; CHECK: # %bb.0: # %entry
355 ; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
356 ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1
357 ; CHECK-NEXT: vmv2r.v v8, v6
358 ; CHECK-NEXT: vmv2r.v v10, v6
359 ; CHECK-NEXT: vmv2r.v v12, v6
360 ; CHECK-NEXT: vlsseg4e8.v v6, (a0), a1, v0.t
363 %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
364 %1 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %0, 0
365 %2 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.mask.nxv16i8(<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1,<vscale x 16 x i8> %1, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
366 %3 = extractvalue {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} %2, 1
367 ret <vscale x 16 x i8> %3
370 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, i32)
371 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>, i32, i32)
373 define <vscale x 2 x i32> @test_vlsseg2_nxv2i32(ptr %base, i32 %offset, i32 %vl) {
374 ; CHECK-LABEL: test_vlsseg2_nxv2i32:
375 ; CHECK: # %bb.0: # %entry
376 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
377 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
380 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
381 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
382 ret <vscale x 2 x i32> %1
385 define <vscale x 2 x i32> @test_vlsseg2_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
386 ; CHECK-LABEL: test_vlsseg2_mask_nxv2i32:
387 ; CHECK: # %bb.0: # %entry
388 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
389 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
390 ; CHECK-NEXT: vmv1r.v v8, v7
391 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
394 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
395 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
396 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
397 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
398 ret <vscale x 2 x i32> %3
401 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, i32)
402 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>, i32, i32)
404 define <vscale x 2 x i32> @test_vlsseg3_nxv2i32(ptr %base, i32 %offset, i32 %vl) {
405 ; CHECK-LABEL: test_vlsseg3_nxv2i32:
406 ; CHECK: # %bb.0: # %entry
407 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
408 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
411 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
412 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
413 ret <vscale x 2 x i32> %1
416 define <vscale x 2 x i32> @test_vlsseg3_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
417 ; CHECK-LABEL: test_vlsseg3_mask_nxv2i32:
418 ; CHECK: # %bb.0: # %entry
419 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
420 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
421 ; CHECK-NEXT: vmv1r.v v8, v7
422 ; CHECK-NEXT: vmv1r.v v9, v7
423 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
426 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
427 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
428 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
429 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
430 ret <vscale x 2 x i32> %3
433 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, i32)
434 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>, i32, i32)
436 define <vscale x 2 x i32> @test_vlsseg4_nxv2i32(ptr %base, i32 %offset, i32 %vl) {
437 ; CHECK-LABEL: test_vlsseg4_nxv2i32:
438 ; CHECK: # %bb.0: # %entry
439 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
440 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
443 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
444 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
445 ret <vscale x 2 x i32> %1
448 define <vscale x 2 x i32> @test_vlsseg4_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
449 ; CHECK-LABEL: test_vlsseg4_mask_nxv2i32:
450 ; CHECK: # %bb.0: # %entry
451 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
452 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
453 ; CHECK-NEXT: vmv1r.v v8, v7
454 ; CHECK-NEXT: vmv1r.v v9, v7
455 ; CHECK-NEXT: vmv1r.v v10, v7
456 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
459 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
460 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
461 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
462 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
463 ret <vscale x 2 x i32> %3
466 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, i32)
467 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>, i32, i32)
469 define <vscale x 2 x i32> @test_vlsseg5_nxv2i32(ptr %base, i32 %offset, i32 %vl) {
470 ; CHECK-LABEL: test_vlsseg5_nxv2i32:
471 ; CHECK: # %bb.0: # %entry
472 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
473 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
476 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
477 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
478 ret <vscale x 2 x i32> %1
481 define <vscale x 2 x i32> @test_vlsseg5_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
482 ; CHECK-LABEL: test_vlsseg5_mask_nxv2i32:
483 ; CHECK: # %bb.0: # %entry
484 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
485 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
486 ; CHECK-NEXT: vmv1r.v v8, v7
487 ; CHECK-NEXT: vmv1r.v v9, v7
488 ; CHECK-NEXT: vmv1r.v v10, v7
489 ; CHECK-NEXT: vmv1r.v v11, v7
490 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
493 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
494 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
495 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
496 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
497 ret <vscale x 2 x i32> %3
500 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, i32)
501 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>, i32, i32)
503 define <vscale x 2 x i32> @test_vlsseg6_nxv2i32(ptr %base, i32 %offset, i32 %vl) {
504 ; CHECK-LABEL: test_vlsseg6_nxv2i32:
505 ; CHECK: # %bb.0: # %entry
506 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
507 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
510 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
511 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
512 ret <vscale x 2 x i32> %1
515 define <vscale x 2 x i32> @test_vlsseg6_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
516 ; CHECK-LABEL: test_vlsseg6_mask_nxv2i32:
517 ; CHECK: # %bb.0: # %entry
518 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
519 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
520 ; CHECK-NEXT: vmv1r.v v8, v7
521 ; CHECK-NEXT: vmv1r.v v9, v7
522 ; CHECK-NEXT: vmv1r.v v10, v7
523 ; CHECK-NEXT: vmv1r.v v11, v7
524 ; CHECK-NEXT: vmv1r.v v12, v7
525 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
528 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
529 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
530 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
531 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
532 ret <vscale x 2 x i32> %3
535 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, i32)
536 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>, i32, i32)
538 define <vscale x 2 x i32> @test_vlsseg7_nxv2i32(ptr %base, i32 %offset, i32 %vl) {
539 ; CHECK-LABEL: test_vlsseg7_nxv2i32:
540 ; CHECK: # %bb.0: # %entry
541 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
542 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
545 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
546 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
547 ret <vscale x 2 x i32> %1
550 define <vscale x 2 x i32> @test_vlsseg7_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
551 ; CHECK-LABEL: test_vlsseg7_mask_nxv2i32:
552 ; CHECK: # %bb.0: # %entry
553 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
554 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
555 ; CHECK-NEXT: vmv1r.v v8, v7
556 ; CHECK-NEXT: vmv1r.v v9, v7
557 ; CHECK-NEXT: vmv1r.v v10, v7
558 ; CHECK-NEXT: vmv1r.v v11, v7
559 ; CHECK-NEXT: vmv1r.v v12, v7
560 ; CHECK-NEXT: vmv1r.v v13, v7
561 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
564 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
565 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
566 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
567 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
568 ret <vscale x 2 x i32> %3
571 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, i32)
572 declare {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, ptr, i32, <vscale x 2 x i1>, i32, i32)
574 define <vscale x 2 x i32> @test_vlsseg8_nxv2i32(ptr %base, i32 %offset, i32 %vl) {
575 ; CHECK-LABEL: test_vlsseg8_nxv2i32:
576 ; CHECK: # %bb.0: # %entry
577 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
578 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
581 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
582 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
583 ret <vscale x 2 x i32> %1
586 define <vscale x 2 x i32> @test_vlsseg8_mask_nxv2i32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
587 ; CHECK-LABEL: test_vlsseg8_mask_nxv2i32:
588 ; CHECK: # %bb.0: # %entry
589 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
590 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
591 ; CHECK-NEXT: vmv1r.v v8, v7
592 ; CHECK-NEXT: vmv1r.v v9, v7
593 ; CHECK-NEXT: vmv1r.v v10, v7
594 ; CHECK-NEXT: vmv1r.v v11, v7
595 ; CHECK-NEXT: vmv1r.v v12, v7
596 ; CHECK-NEXT: vmv1r.v v13, v7
597 ; CHECK-NEXT: vmv1r.v v14, v7
598 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
601 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
602 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 0
603 %2 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.mask.nxv2i32(<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1,<vscale x 2 x i32> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
604 %3 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} %2, 1
605 ret <vscale x 2 x i32> %3
608 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, i32)
609 declare {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>, i32, i32)
611 define <vscale x 4 x i16> @test_vlsseg2_nxv4i16(ptr %base, i32 %offset, i32 %vl) {
612 ; CHECK-LABEL: test_vlsseg2_nxv4i16:
613 ; CHECK: # %bb.0: # %entry
614 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
615 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
618 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
619 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
620 ret <vscale x 4 x i16> %1
623 define <vscale x 4 x i16> @test_vlsseg2_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
624 ; CHECK-LABEL: test_vlsseg2_mask_nxv4i16:
625 ; CHECK: # %bb.0: # %entry
626 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
627 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
628 ; CHECK-NEXT: vmv1r.v v8, v7
629 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
632 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
633 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
634 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
635 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
636 ret <vscale x 4 x i16> %3
639 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, i32)
640 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>, i32, i32)
642 define <vscale x 4 x i16> @test_vlsseg3_nxv4i16(ptr %base, i32 %offset, i32 %vl) {
643 ; CHECK-LABEL: test_vlsseg3_nxv4i16:
644 ; CHECK: # %bb.0: # %entry
645 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
646 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
649 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
650 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
651 ret <vscale x 4 x i16> %1
654 define <vscale x 4 x i16> @test_vlsseg3_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
655 ; CHECK-LABEL: test_vlsseg3_mask_nxv4i16:
656 ; CHECK: # %bb.0: # %entry
657 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
658 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
659 ; CHECK-NEXT: vmv1r.v v8, v7
660 ; CHECK-NEXT: vmv1r.v v9, v7
661 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
664 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
665 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
666 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
667 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
668 ret <vscale x 4 x i16> %3
671 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, i32)
672 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>, i32, i32)
674 define <vscale x 4 x i16> @test_vlsseg4_nxv4i16(ptr %base, i32 %offset, i32 %vl) {
675 ; CHECK-LABEL: test_vlsseg4_nxv4i16:
676 ; CHECK: # %bb.0: # %entry
677 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
678 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
681 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
682 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
683 ret <vscale x 4 x i16> %1
686 define <vscale x 4 x i16> @test_vlsseg4_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
687 ; CHECK-LABEL: test_vlsseg4_mask_nxv4i16:
688 ; CHECK: # %bb.0: # %entry
689 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
690 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
691 ; CHECK-NEXT: vmv1r.v v8, v7
692 ; CHECK-NEXT: vmv1r.v v9, v7
693 ; CHECK-NEXT: vmv1r.v v10, v7
694 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
697 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
698 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
699 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
700 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
701 ret <vscale x 4 x i16> %3
704 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, i32)
705 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>, i32, i32)
707 define <vscale x 4 x i16> @test_vlsseg5_nxv4i16(ptr %base, i32 %offset, i32 %vl) {
708 ; CHECK-LABEL: test_vlsseg5_nxv4i16:
709 ; CHECK: # %bb.0: # %entry
710 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
711 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
714 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
715 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
716 ret <vscale x 4 x i16> %1
719 define <vscale x 4 x i16> @test_vlsseg5_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
720 ; CHECK-LABEL: test_vlsseg5_mask_nxv4i16:
721 ; CHECK: # %bb.0: # %entry
722 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
723 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
724 ; CHECK-NEXT: vmv1r.v v8, v7
725 ; CHECK-NEXT: vmv1r.v v9, v7
726 ; CHECK-NEXT: vmv1r.v v10, v7
727 ; CHECK-NEXT: vmv1r.v v11, v7
728 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
731 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
732 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
733 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
734 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
735 ret <vscale x 4 x i16> %3
738 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, i32)
739 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>, i32, i32)
741 define <vscale x 4 x i16> @test_vlsseg6_nxv4i16(ptr %base, i32 %offset, i32 %vl) {
742 ; CHECK-LABEL: test_vlsseg6_nxv4i16:
743 ; CHECK: # %bb.0: # %entry
744 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
745 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
748 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
749 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
750 ret <vscale x 4 x i16> %1
753 define <vscale x 4 x i16> @test_vlsseg6_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
754 ; CHECK-LABEL: test_vlsseg6_mask_nxv4i16:
755 ; CHECK: # %bb.0: # %entry
756 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
757 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
758 ; CHECK-NEXT: vmv1r.v v8, v7
759 ; CHECK-NEXT: vmv1r.v v9, v7
760 ; CHECK-NEXT: vmv1r.v v10, v7
761 ; CHECK-NEXT: vmv1r.v v11, v7
762 ; CHECK-NEXT: vmv1r.v v12, v7
763 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
766 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
767 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
768 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
769 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
770 ret <vscale x 4 x i16> %3
773 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, i32)
774 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>, i32, i32)
776 define <vscale x 4 x i16> @test_vlsseg7_nxv4i16(ptr %base, i32 %offset, i32 %vl) {
777 ; CHECK-LABEL: test_vlsseg7_nxv4i16:
778 ; CHECK: # %bb.0: # %entry
779 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
780 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
783 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
784 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
785 ret <vscale x 4 x i16> %1
788 define <vscale x 4 x i16> @test_vlsseg7_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
789 ; CHECK-LABEL: test_vlsseg7_mask_nxv4i16:
790 ; CHECK: # %bb.0: # %entry
791 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
792 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
793 ; CHECK-NEXT: vmv1r.v v8, v7
794 ; CHECK-NEXT: vmv1r.v v9, v7
795 ; CHECK-NEXT: vmv1r.v v10, v7
796 ; CHECK-NEXT: vmv1r.v v11, v7
797 ; CHECK-NEXT: vmv1r.v v12, v7
798 ; CHECK-NEXT: vmv1r.v v13, v7
799 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
802 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
803 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
804 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
805 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
806 ret <vscale x 4 x i16> %3
809 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, i32)
810 declare {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, ptr, i32, <vscale x 4 x i1>, i32, i32)
812 define <vscale x 4 x i16> @test_vlsseg8_nxv4i16(ptr %base, i32 %offset, i32 %vl) {
813 ; CHECK-LABEL: test_vlsseg8_nxv4i16:
814 ; CHECK: # %bb.0: # %entry
815 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
816 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
819 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
820 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 1
821 ret <vscale x 4 x i16> %1
824 define <vscale x 4 x i16> @test_vlsseg8_mask_nxv4i16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
825 ; CHECK-LABEL: test_vlsseg8_mask_nxv4i16:
826 ; CHECK: # %bb.0: # %entry
827 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
828 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
829 ; CHECK-NEXT: vmv1r.v v8, v7
830 ; CHECK-NEXT: vmv1r.v v9, v7
831 ; CHECK-NEXT: vmv1r.v v10, v7
832 ; CHECK-NEXT: vmv1r.v v11, v7
833 ; CHECK-NEXT: vmv1r.v v12, v7
834 ; CHECK-NEXT: vmv1r.v v13, v7
835 ; CHECK-NEXT: vmv1r.v v14, v7
836 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
839 %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
840 %1 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %0, 0
841 %2 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.mask.nxv4i16(<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1,<vscale x 4 x i16> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
842 %3 = extractvalue {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} %2, 1
843 ret <vscale x 4 x i16> %3
846 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, i32)
847 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>, i32, i32)
849 define <vscale x 1 x i32> @test_vlsseg2_nxv1i32(ptr %base, i32 %offset, i32 %vl) {
850 ; CHECK-LABEL: test_vlsseg2_nxv1i32:
851 ; CHECK: # %bb.0: # %entry
852 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
853 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
856 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
857 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
858 ret <vscale x 1 x i32> %1
861 define <vscale x 1 x i32> @test_vlsseg2_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
862 ; CHECK-LABEL: test_vlsseg2_mask_nxv1i32:
863 ; CHECK: # %bb.0: # %entry
864 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
865 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
866 ; CHECK-NEXT: vmv1r.v v8, v7
867 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
870 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
871 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
872 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
873 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
874 ret <vscale x 1 x i32> %3
877 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, i32)
878 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>, i32, i32)
880 define <vscale x 1 x i32> @test_vlsseg3_nxv1i32(ptr %base, i32 %offset, i32 %vl) {
881 ; CHECK-LABEL: test_vlsseg3_nxv1i32:
882 ; CHECK: # %bb.0: # %entry
883 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
884 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
887 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
888 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
889 ret <vscale x 1 x i32> %1
892 define <vscale x 1 x i32> @test_vlsseg3_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
893 ; CHECK-LABEL: test_vlsseg3_mask_nxv1i32:
894 ; CHECK: # %bb.0: # %entry
895 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
896 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
897 ; CHECK-NEXT: vmv1r.v v8, v7
898 ; CHECK-NEXT: vmv1r.v v9, v7
899 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
902 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
903 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
904 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
905 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
906 ret <vscale x 1 x i32> %3
909 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, i32)
910 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>, i32, i32)
912 define <vscale x 1 x i32> @test_vlsseg4_nxv1i32(ptr %base, i32 %offset, i32 %vl) {
913 ; CHECK-LABEL: test_vlsseg4_nxv1i32:
914 ; CHECK: # %bb.0: # %entry
915 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
916 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
919 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
920 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
921 ret <vscale x 1 x i32> %1
924 define <vscale x 1 x i32> @test_vlsseg4_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
925 ; CHECK-LABEL: test_vlsseg4_mask_nxv1i32:
926 ; CHECK: # %bb.0: # %entry
927 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
928 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
929 ; CHECK-NEXT: vmv1r.v v8, v7
930 ; CHECK-NEXT: vmv1r.v v9, v7
931 ; CHECK-NEXT: vmv1r.v v10, v7
932 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
935 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
936 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
937 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
938 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
939 ret <vscale x 1 x i32> %3
942 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, i32)
943 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>, i32, i32)
945 define <vscale x 1 x i32> @test_vlsseg5_nxv1i32(ptr %base, i32 %offset, i32 %vl) {
946 ; CHECK-LABEL: test_vlsseg5_nxv1i32:
947 ; CHECK: # %bb.0: # %entry
948 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
949 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
952 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
953 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
954 ret <vscale x 1 x i32> %1
957 define <vscale x 1 x i32> @test_vlsseg5_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
958 ; CHECK-LABEL: test_vlsseg5_mask_nxv1i32:
959 ; CHECK: # %bb.0: # %entry
960 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
961 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
962 ; CHECK-NEXT: vmv1r.v v8, v7
963 ; CHECK-NEXT: vmv1r.v v9, v7
964 ; CHECK-NEXT: vmv1r.v v10, v7
965 ; CHECK-NEXT: vmv1r.v v11, v7
966 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
969 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
970 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
971 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
972 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
973 ret <vscale x 1 x i32> %3
976 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, i32)
977 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>, i32, i32)
979 define <vscale x 1 x i32> @test_vlsseg6_nxv1i32(ptr %base, i32 %offset, i32 %vl) {
980 ; CHECK-LABEL: test_vlsseg6_nxv1i32:
981 ; CHECK: # %bb.0: # %entry
982 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
983 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
986 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
987 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
988 ret <vscale x 1 x i32> %1
991 define <vscale x 1 x i32> @test_vlsseg6_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
992 ; CHECK-LABEL: test_vlsseg6_mask_nxv1i32:
993 ; CHECK: # %bb.0: # %entry
994 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
995 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
996 ; CHECK-NEXT: vmv1r.v v8, v7
997 ; CHECK-NEXT: vmv1r.v v9, v7
998 ; CHECK-NEXT: vmv1r.v v10, v7
999 ; CHECK-NEXT: vmv1r.v v11, v7
1000 ; CHECK-NEXT: vmv1r.v v12, v7
1001 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
1004 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
1005 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
1006 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1007 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
1008 ret <vscale x 1 x i32> %3
1011 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, i32)
1012 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1014 define <vscale x 1 x i32> @test_vlsseg7_nxv1i32(ptr %base, i32 %offset, i32 %vl) {
1015 ; CHECK-LABEL: test_vlsseg7_nxv1i32:
1016 ; CHECK: # %bb.0: # %entry
1017 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1018 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
1021 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
1022 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1023 ret <vscale x 1 x i32> %1
1026 define <vscale x 1 x i32> @test_vlsseg7_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1027 ; CHECK-LABEL: test_vlsseg7_mask_nxv1i32:
1028 ; CHECK: # %bb.0: # %entry
1029 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
1030 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
1031 ; CHECK-NEXT: vmv1r.v v8, v7
1032 ; CHECK-NEXT: vmv1r.v v9, v7
1033 ; CHECK-NEXT: vmv1r.v v10, v7
1034 ; CHECK-NEXT: vmv1r.v v11, v7
1035 ; CHECK-NEXT: vmv1r.v v12, v7
1036 ; CHECK-NEXT: vmv1r.v v13, v7
1037 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
1040 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
1041 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
1042 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1043 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
1044 ret <vscale x 1 x i32> %3
1047 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, i32)
1048 declare {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1050 define <vscale x 1 x i32> @test_vlsseg8_nxv1i32(ptr %base, i32 %offset, i32 %vl) {
1051 ; CHECK-LABEL: test_vlsseg8_nxv1i32:
1052 ; CHECK: # %bb.0: # %entry
1053 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
1054 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
1057 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
1058 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
1059 ret <vscale x 1 x i32> %1
1062 define <vscale x 1 x i32> @test_vlsseg8_mask_nxv1i32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1063 ; CHECK-LABEL: test_vlsseg8_mask_nxv1i32:
1064 ; CHECK: # %bb.0: # %entry
1065 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
1066 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
1067 ; CHECK-NEXT: vmv1r.v v8, v7
1068 ; CHECK-NEXT: vmv1r.v v9, v7
1069 ; CHECK-NEXT: vmv1r.v v10, v7
1070 ; CHECK-NEXT: vmv1r.v v11, v7
1071 ; CHECK-NEXT: vmv1r.v v12, v7
1072 ; CHECK-NEXT: vmv1r.v v13, v7
1073 ; CHECK-NEXT: vmv1r.v v14, v7
1074 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
1077 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
1078 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 0
1079 %2 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.mask.nxv1i32(<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1,<vscale x 1 x i32> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1080 %3 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} %2, 1
1081 ret <vscale x 1 x i32> %3
1084 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i32, i32)
1085 declare {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1087 define <vscale x 8 x i16> @test_vlsseg2_nxv8i16(ptr %base, i32 %offset, i32 %vl) {
1088 ; CHECK-LABEL: test_vlsseg2_nxv8i16:
1089 ; CHECK: # %bb.0: # %entry
1090 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1091 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
1094 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1095 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
1096 ret <vscale x 8 x i16> %1
1099 define <vscale x 8 x i16> @test_vlsseg2_mask_nxv8i16(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1100 ; CHECK-LABEL: test_vlsseg2_mask_nxv8i16:
1101 ; CHECK: # %bb.0: # %entry
1102 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
1103 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
1104 ; CHECK-NEXT: vmv2r.v v8, v6
1105 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
1108 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1109 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
1110 %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1111 %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
1112 ret <vscale x 8 x i16> %3
1115 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i32, i32)
1116 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1118 define <vscale x 8 x i16> @test_vlsseg3_nxv8i16(ptr %base, i32 %offset, i32 %vl) {
1119 ; CHECK-LABEL: test_vlsseg3_nxv8i16:
1120 ; CHECK: # %bb.0: # %entry
1121 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1122 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
1125 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1126 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
1127 ret <vscale x 8 x i16> %1
1130 define <vscale x 8 x i16> @test_vlsseg3_mask_nxv8i16(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1131 ; CHECK-LABEL: test_vlsseg3_mask_nxv8i16:
1132 ; CHECK: # %bb.0: # %entry
1133 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
1134 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
1135 ; CHECK-NEXT: vmv2r.v v8, v6
1136 ; CHECK-NEXT: vmv2r.v v10, v6
1137 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
1140 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1141 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
1142 %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1143 %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
1144 ret <vscale x 8 x i16> %3
1147 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i32, i32)
1148 declare {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1150 define <vscale x 8 x i16> @test_vlsseg4_nxv8i16(ptr %base, i32 %offset, i32 %vl) {
1151 ; CHECK-LABEL: test_vlsseg4_nxv8i16:
1152 ; CHECK: # %bb.0: # %entry
1153 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
1154 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
1157 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1158 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 1
1159 ret <vscale x 8 x i16> %1
1162 define <vscale x 8 x i16> @test_vlsseg4_mask_nxv8i16(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1163 ; CHECK-LABEL: test_vlsseg4_mask_nxv8i16:
1164 ; CHECK: # %bb.0: # %entry
1165 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
1166 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
1167 ; CHECK-NEXT: vmv2r.v v8, v6
1168 ; CHECK-NEXT: vmv2r.v v10, v6
1169 ; CHECK-NEXT: vmv2r.v v12, v6
1170 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
1173 %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1174 %1 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %0, 0
1175 %2 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.mask.nxv8i16(<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1,<vscale x 8 x i16> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1176 %3 = extractvalue {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} %2, 1
1177 ret <vscale x 8 x i16> %3
1180 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, i32)
1181 declare {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1183 define <vscale x 8 x i8> @test_vlsseg2_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
1184 ; CHECK-LABEL: test_vlsseg2_nxv8i8:
1185 ; CHECK: # %bb.0: # %entry
1186 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1187 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
1190 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1191 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1192 ret <vscale x 8 x i8> %1
1195 define <vscale x 8 x i8> @test_vlsseg2_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1196 ; CHECK-LABEL: test_vlsseg2_mask_nxv8i8:
1197 ; CHECK: # %bb.0: # %entry
1198 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1199 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
1200 ; CHECK-NEXT: vmv1r.v v8, v7
1201 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
1204 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1205 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1206 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1207 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1208 ret <vscale x 8 x i8> %3
1211 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, i32)
1212 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1214 define <vscale x 8 x i8> @test_vlsseg3_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
1215 ; CHECK-LABEL: test_vlsseg3_nxv8i8:
1216 ; CHECK: # %bb.0: # %entry
1217 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1218 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
1221 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1222 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1223 ret <vscale x 8 x i8> %1
1226 define <vscale x 8 x i8> @test_vlsseg3_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1227 ; CHECK-LABEL: test_vlsseg3_mask_nxv8i8:
1228 ; CHECK: # %bb.0: # %entry
1229 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1230 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
1231 ; CHECK-NEXT: vmv1r.v v8, v7
1232 ; CHECK-NEXT: vmv1r.v v9, v7
1233 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
1236 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1237 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1238 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1239 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1240 ret <vscale x 8 x i8> %3
1243 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, i32)
1244 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1246 define <vscale x 8 x i8> @test_vlsseg4_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
1247 ; CHECK-LABEL: test_vlsseg4_nxv8i8:
1248 ; CHECK: # %bb.0: # %entry
1249 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1250 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
1253 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1254 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1255 ret <vscale x 8 x i8> %1
1258 define <vscale x 8 x i8> @test_vlsseg4_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1259 ; CHECK-LABEL: test_vlsseg4_mask_nxv8i8:
1260 ; CHECK: # %bb.0: # %entry
1261 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1262 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
1263 ; CHECK-NEXT: vmv1r.v v8, v7
1264 ; CHECK-NEXT: vmv1r.v v9, v7
1265 ; CHECK-NEXT: vmv1r.v v10, v7
1266 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
1269 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1270 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1271 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1272 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1273 ret <vscale x 8 x i8> %3
1276 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, i32)
1277 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1279 define <vscale x 8 x i8> @test_vlsseg5_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
1280 ; CHECK-LABEL: test_vlsseg5_nxv8i8:
1281 ; CHECK: # %bb.0: # %entry
1282 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1283 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
1286 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1287 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1288 ret <vscale x 8 x i8> %1
1291 define <vscale x 8 x i8> @test_vlsseg5_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1292 ; CHECK-LABEL: test_vlsseg5_mask_nxv8i8:
1293 ; CHECK: # %bb.0: # %entry
1294 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1295 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
1296 ; CHECK-NEXT: vmv1r.v v8, v7
1297 ; CHECK-NEXT: vmv1r.v v9, v7
1298 ; CHECK-NEXT: vmv1r.v v10, v7
1299 ; CHECK-NEXT: vmv1r.v v11, v7
1300 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
1303 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1304 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1305 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1306 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1307 ret <vscale x 8 x i8> %3
1310 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, i32)
1311 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1313 define <vscale x 8 x i8> @test_vlsseg6_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
1314 ; CHECK-LABEL: test_vlsseg6_nxv8i8:
1315 ; CHECK: # %bb.0: # %entry
1316 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1317 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
1320 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1321 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1322 ret <vscale x 8 x i8> %1
1325 define <vscale x 8 x i8> @test_vlsseg6_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1326 ; CHECK-LABEL: test_vlsseg6_mask_nxv8i8:
1327 ; CHECK: # %bb.0: # %entry
1328 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1329 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
1330 ; CHECK-NEXT: vmv1r.v v8, v7
1331 ; CHECK-NEXT: vmv1r.v v9, v7
1332 ; CHECK-NEXT: vmv1r.v v10, v7
1333 ; CHECK-NEXT: vmv1r.v v11, v7
1334 ; CHECK-NEXT: vmv1r.v v12, v7
1335 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
1338 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1339 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1340 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1341 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1342 ret <vscale x 8 x i8> %3
1345 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, i32)
1346 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1348 define <vscale x 8 x i8> @test_vlsseg7_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
1349 ; CHECK-LABEL: test_vlsseg7_nxv8i8:
1350 ; CHECK: # %bb.0: # %entry
1351 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1352 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
1355 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1356 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1357 ret <vscale x 8 x i8> %1
1360 define <vscale x 8 x i8> @test_vlsseg7_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1361 ; CHECK-LABEL: test_vlsseg7_mask_nxv8i8:
1362 ; CHECK: # %bb.0: # %entry
1363 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1364 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
1365 ; CHECK-NEXT: vmv1r.v v8, v7
1366 ; CHECK-NEXT: vmv1r.v v9, v7
1367 ; CHECK-NEXT: vmv1r.v v10, v7
1368 ; CHECK-NEXT: vmv1r.v v11, v7
1369 ; CHECK-NEXT: vmv1r.v v12, v7
1370 ; CHECK-NEXT: vmv1r.v v13, v7
1371 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
1374 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1375 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1376 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1377 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1378 ret <vscale x 8 x i8> %3
1381 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, i32)
1382 declare {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1384 define <vscale x 8 x i8> @test_vlsseg8_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
1385 ; CHECK-LABEL: test_vlsseg8_nxv8i8:
1386 ; CHECK: # %bb.0: # %entry
1387 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
1388 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
1391 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1392 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 1
1393 ret <vscale x 8 x i8> %1
1396 define <vscale x 8 x i8> @test_vlsseg8_mask_nxv8i8(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1397 ; CHECK-LABEL: test_vlsseg8_mask_nxv8i8:
1398 ; CHECK: # %bb.0: # %entry
1399 ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, mu
1400 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
1401 ; CHECK-NEXT: vmv1r.v v8, v7
1402 ; CHECK-NEXT: vmv1r.v v9, v7
1403 ; CHECK-NEXT: vmv1r.v v10, v7
1404 ; CHECK-NEXT: vmv1r.v v11, v7
1405 ; CHECK-NEXT: vmv1r.v v12, v7
1406 ; CHECK-NEXT: vmv1r.v v13, v7
1407 ; CHECK-NEXT: vmv1r.v v14, v7
1408 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
1411 %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1412 %1 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %0, 0
1413 %2 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.mask.nxv8i8(<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1,<vscale x 8 x i8> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1414 %3 = extractvalue {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} %2, 1
1415 ret <vscale x 8 x i8> %3
1418 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, i32, i32)
1419 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr, i32, <vscale x 8 x i1>, i32, i32)
1421 define <vscale x 8 x i32> @test_vlsseg2_nxv8i32(ptr %base, i32 %offset, i32 %vl) {
1422 ; CHECK-LABEL: test_vlsseg2_nxv8i32:
1423 ; CHECK: # %bb.0: # %entry
1424 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
1425 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
1428 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %offset, i32 %vl)
1429 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
1430 ret <vscale x 8 x i32> %1
1433 define <vscale x 8 x i32> @test_vlsseg2_mask_nxv8i32(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
1434 ; CHECK-LABEL: test_vlsseg2_mask_nxv8i32:
1435 ; CHECK: # %bb.0: # %entry
1436 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
1437 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
1438 ; CHECK-NEXT: vmv4r.v v8, v4
1439 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
1442 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %offset, i32 %vl)
1443 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 0
1444 %2 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.mask.nxv8i32(<vscale x 8 x i32> %1,<vscale x 8 x i32> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
1445 %3 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %2, 1
1446 ret <vscale x 8 x i32> %3
1449 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, i32)
1450 declare {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>, i32, i32)
1452 define <vscale x 4 x i8> @test_vlsseg2_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
1453 ; CHECK-LABEL: test_vlsseg2_nxv4i8:
1454 ; CHECK: # %bb.0: # %entry
1455 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1456 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
1459 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1460 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1461 ret <vscale x 4 x i8> %1
1464 define <vscale x 4 x i8> @test_vlsseg2_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
1465 ; CHECK-LABEL: test_vlsseg2_mask_nxv4i8:
1466 ; CHECK: # %bb.0: # %entry
1467 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
1468 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
1469 ; CHECK-NEXT: vmv1r.v v8, v7
1470 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
1473 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1474 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1475 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1476 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1477 ret <vscale x 4 x i8> %3
1480 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, i32)
1481 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>, i32, i32)
1483 define <vscale x 4 x i8> @test_vlsseg3_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
1484 ; CHECK-LABEL: test_vlsseg3_nxv4i8:
1485 ; CHECK: # %bb.0: # %entry
1486 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1487 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
1490 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1491 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1492 ret <vscale x 4 x i8> %1
1495 define <vscale x 4 x i8> @test_vlsseg3_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
1496 ; CHECK-LABEL: test_vlsseg3_mask_nxv4i8:
1497 ; CHECK: # %bb.0: # %entry
1498 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
1499 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
1500 ; CHECK-NEXT: vmv1r.v v8, v7
1501 ; CHECK-NEXT: vmv1r.v v9, v7
1502 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
1505 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1506 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1507 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1508 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1509 ret <vscale x 4 x i8> %3
1512 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, i32)
1513 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>, i32, i32)
1515 define <vscale x 4 x i8> @test_vlsseg4_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
1516 ; CHECK-LABEL: test_vlsseg4_nxv4i8:
1517 ; CHECK: # %bb.0: # %entry
1518 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1519 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
1522 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1523 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1524 ret <vscale x 4 x i8> %1
1527 define <vscale x 4 x i8> @test_vlsseg4_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
1528 ; CHECK-LABEL: test_vlsseg4_mask_nxv4i8:
1529 ; CHECK: # %bb.0: # %entry
1530 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
1531 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
1532 ; CHECK-NEXT: vmv1r.v v8, v7
1533 ; CHECK-NEXT: vmv1r.v v9, v7
1534 ; CHECK-NEXT: vmv1r.v v10, v7
1535 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
1538 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1539 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1540 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1541 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1542 ret <vscale x 4 x i8> %3
1545 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, i32)
1546 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>, i32, i32)
1548 define <vscale x 4 x i8> @test_vlsseg5_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
1549 ; CHECK-LABEL: test_vlsseg5_nxv4i8:
1550 ; CHECK: # %bb.0: # %entry
1551 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1552 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
1555 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1556 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1557 ret <vscale x 4 x i8> %1
1560 define <vscale x 4 x i8> @test_vlsseg5_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
1561 ; CHECK-LABEL: test_vlsseg5_mask_nxv4i8:
1562 ; CHECK: # %bb.0: # %entry
1563 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
1564 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
1565 ; CHECK-NEXT: vmv1r.v v8, v7
1566 ; CHECK-NEXT: vmv1r.v v9, v7
1567 ; CHECK-NEXT: vmv1r.v v10, v7
1568 ; CHECK-NEXT: vmv1r.v v11, v7
1569 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
1572 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1573 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1574 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1575 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1576 ret <vscale x 4 x i8> %3
1579 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, i32)
1580 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>, i32, i32)
1582 define <vscale x 4 x i8> @test_vlsseg6_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
1583 ; CHECK-LABEL: test_vlsseg6_nxv4i8:
1584 ; CHECK: # %bb.0: # %entry
1585 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1586 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
1589 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1590 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1591 ret <vscale x 4 x i8> %1
1594 define <vscale x 4 x i8> @test_vlsseg6_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
1595 ; CHECK-LABEL: test_vlsseg6_mask_nxv4i8:
1596 ; CHECK: # %bb.0: # %entry
1597 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
1598 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
1599 ; CHECK-NEXT: vmv1r.v v8, v7
1600 ; CHECK-NEXT: vmv1r.v v9, v7
1601 ; CHECK-NEXT: vmv1r.v v10, v7
1602 ; CHECK-NEXT: vmv1r.v v11, v7
1603 ; CHECK-NEXT: vmv1r.v v12, v7
1604 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
1607 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1608 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1609 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1610 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1611 ret <vscale x 4 x i8> %3
1614 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, i32)
1615 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>, i32, i32)
1617 define <vscale x 4 x i8> @test_vlsseg7_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
1618 ; CHECK-LABEL: test_vlsseg7_nxv4i8:
1619 ; CHECK: # %bb.0: # %entry
1620 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1621 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
1624 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1625 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1626 ret <vscale x 4 x i8> %1
1629 define <vscale x 4 x i8> @test_vlsseg7_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
1630 ; CHECK-LABEL: test_vlsseg7_mask_nxv4i8:
1631 ; CHECK: # %bb.0: # %entry
1632 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
1633 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
1634 ; CHECK-NEXT: vmv1r.v v8, v7
1635 ; CHECK-NEXT: vmv1r.v v9, v7
1636 ; CHECK-NEXT: vmv1r.v v10, v7
1637 ; CHECK-NEXT: vmv1r.v v11, v7
1638 ; CHECK-NEXT: vmv1r.v v12, v7
1639 ; CHECK-NEXT: vmv1r.v v13, v7
1640 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
1643 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1644 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1645 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1646 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1647 ret <vscale x 4 x i8> %3
1650 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, i32)
1651 declare {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, ptr, i32, <vscale x 4 x i1>, i32, i32)
1653 define <vscale x 4 x i8> @test_vlsseg8_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
1654 ; CHECK-LABEL: test_vlsseg8_nxv4i8:
1655 ; CHECK: # %bb.0: # %entry
1656 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
1657 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
1660 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1661 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 1
1662 ret <vscale x 4 x i8> %1
1665 define <vscale x 4 x i8> @test_vlsseg8_mask_nxv4i8(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
1666 ; CHECK-LABEL: test_vlsseg8_mask_nxv4i8:
1667 ; CHECK: # %bb.0: # %entry
1668 ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
1669 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
1670 ; CHECK-NEXT: vmv1r.v v8, v7
1671 ; CHECK-NEXT: vmv1r.v v9, v7
1672 ; CHECK-NEXT: vmv1r.v v10, v7
1673 ; CHECK-NEXT: vmv1r.v v11, v7
1674 ; CHECK-NEXT: vmv1r.v v12, v7
1675 ; CHECK-NEXT: vmv1r.v v13, v7
1676 ; CHECK-NEXT: vmv1r.v v14, v7
1677 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
1680 %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1681 %1 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %0, 0
1682 %2 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.mask.nxv4i8(<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1,<vscale x 4 x i8> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
1683 %3 = extractvalue {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} %2, 1
1684 ret <vscale x 4 x i8> %3
1687 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, i32)
1688 declare {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1690 define <vscale x 1 x i16> @test_vlsseg2_nxv1i16(ptr %base, i32 %offset, i32 %vl) {
1691 ; CHECK-LABEL: test_vlsseg2_nxv1i16:
1692 ; CHECK: # %bb.0: # %entry
1693 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1694 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
1697 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1698 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1699 ret <vscale x 1 x i16> %1
1702 define <vscale x 1 x i16> @test_vlsseg2_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1703 ; CHECK-LABEL: test_vlsseg2_mask_nxv1i16:
1704 ; CHECK: # %bb.0: # %entry
1705 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1706 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
1707 ; CHECK-NEXT: vmv1r.v v8, v7
1708 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
1711 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1712 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1713 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1714 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1715 ret <vscale x 1 x i16> %3
1718 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, i32)
1719 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1721 define <vscale x 1 x i16> @test_vlsseg3_nxv1i16(ptr %base, i32 %offset, i32 %vl) {
1722 ; CHECK-LABEL: test_vlsseg3_nxv1i16:
1723 ; CHECK: # %bb.0: # %entry
1724 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1725 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
1728 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1729 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1730 ret <vscale x 1 x i16> %1
1733 define <vscale x 1 x i16> @test_vlsseg3_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1734 ; CHECK-LABEL: test_vlsseg3_mask_nxv1i16:
1735 ; CHECK: # %bb.0: # %entry
1736 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1737 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
1738 ; CHECK-NEXT: vmv1r.v v8, v7
1739 ; CHECK-NEXT: vmv1r.v v9, v7
1740 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
1743 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1744 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1745 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1746 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1747 ret <vscale x 1 x i16> %3
1750 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, i32)
1751 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1753 define <vscale x 1 x i16> @test_vlsseg4_nxv1i16(ptr %base, i32 %offset, i32 %vl) {
1754 ; CHECK-LABEL: test_vlsseg4_nxv1i16:
1755 ; CHECK: # %bb.0: # %entry
1756 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1757 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
1760 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1761 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1762 ret <vscale x 1 x i16> %1
1765 define <vscale x 1 x i16> @test_vlsseg4_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1766 ; CHECK-LABEL: test_vlsseg4_mask_nxv1i16:
1767 ; CHECK: # %bb.0: # %entry
1768 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1769 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
1770 ; CHECK-NEXT: vmv1r.v v8, v7
1771 ; CHECK-NEXT: vmv1r.v v9, v7
1772 ; CHECK-NEXT: vmv1r.v v10, v7
1773 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
1776 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1777 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1778 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1779 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1780 ret <vscale x 1 x i16> %3
1783 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, i32)
1784 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1786 define <vscale x 1 x i16> @test_vlsseg5_nxv1i16(ptr %base, i32 %offset, i32 %vl) {
1787 ; CHECK-LABEL: test_vlsseg5_nxv1i16:
1788 ; CHECK: # %bb.0: # %entry
1789 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1790 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
1793 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1794 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1795 ret <vscale x 1 x i16> %1
1798 define <vscale x 1 x i16> @test_vlsseg5_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1799 ; CHECK-LABEL: test_vlsseg5_mask_nxv1i16:
1800 ; CHECK: # %bb.0: # %entry
1801 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1802 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
1803 ; CHECK-NEXT: vmv1r.v v8, v7
1804 ; CHECK-NEXT: vmv1r.v v9, v7
1805 ; CHECK-NEXT: vmv1r.v v10, v7
1806 ; CHECK-NEXT: vmv1r.v v11, v7
1807 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
1810 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1811 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1812 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1813 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1814 ret <vscale x 1 x i16> %3
1817 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, i32)
1818 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1820 define <vscale x 1 x i16> @test_vlsseg6_nxv1i16(ptr %base, i32 %offset, i32 %vl) {
1821 ; CHECK-LABEL: test_vlsseg6_nxv1i16:
1822 ; CHECK: # %bb.0: # %entry
1823 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1824 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
1827 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1828 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1829 ret <vscale x 1 x i16> %1
1832 define <vscale x 1 x i16> @test_vlsseg6_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1833 ; CHECK-LABEL: test_vlsseg6_mask_nxv1i16:
1834 ; CHECK: # %bb.0: # %entry
1835 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1836 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
1837 ; CHECK-NEXT: vmv1r.v v8, v7
1838 ; CHECK-NEXT: vmv1r.v v9, v7
1839 ; CHECK-NEXT: vmv1r.v v10, v7
1840 ; CHECK-NEXT: vmv1r.v v11, v7
1841 ; CHECK-NEXT: vmv1r.v v12, v7
1842 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
1845 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1846 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1847 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1848 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1849 ret <vscale x 1 x i16> %3
1852 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, i32)
1853 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1855 define <vscale x 1 x i16> @test_vlsseg7_nxv1i16(ptr %base, i32 %offset, i32 %vl) {
1856 ; CHECK-LABEL: test_vlsseg7_nxv1i16:
1857 ; CHECK: # %bb.0: # %entry
1858 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1859 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
1862 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1863 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1864 ret <vscale x 1 x i16> %1
1867 define <vscale x 1 x i16> @test_vlsseg7_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1868 ; CHECK-LABEL: test_vlsseg7_mask_nxv1i16:
1869 ; CHECK: # %bb.0: # %entry
1870 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1871 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
1872 ; CHECK-NEXT: vmv1r.v v8, v7
1873 ; CHECK-NEXT: vmv1r.v v9, v7
1874 ; CHECK-NEXT: vmv1r.v v10, v7
1875 ; CHECK-NEXT: vmv1r.v v11, v7
1876 ; CHECK-NEXT: vmv1r.v v12, v7
1877 ; CHECK-NEXT: vmv1r.v v13, v7
1878 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
1881 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1882 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1883 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1884 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1885 ret <vscale x 1 x i16> %3
1888 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, i32)
1889 declare {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, ptr, i32, <vscale x 1 x i1>, i32, i32)
1891 define <vscale x 1 x i16> @test_vlsseg8_nxv1i16(ptr %base, i32 %offset, i32 %vl) {
1892 ; CHECK-LABEL: test_vlsseg8_nxv1i16:
1893 ; CHECK: # %bb.0: # %entry
1894 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
1895 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
1898 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1899 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 1
1900 ret <vscale x 1 x i16> %1
1903 define <vscale x 1 x i16> @test_vlsseg8_mask_nxv1i16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
1904 ; CHECK-LABEL: test_vlsseg8_mask_nxv1i16:
1905 ; CHECK: # %bb.0: # %entry
1906 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
1907 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
1908 ; CHECK-NEXT: vmv1r.v v8, v7
1909 ; CHECK-NEXT: vmv1r.v v9, v7
1910 ; CHECK-NEXT: vmv1r.v v10, v7
1911 ; CHECK-NEXT: vmv1r.v v11, v7
1912 ; CHECK-NEXT: vmv1r.v v12, v7
1913 ; CHECK-NEXT: vmv1r.v v13, v7
1914 ; CHECK-NEXT: vmv1r.v v14, v7
1915 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
1918 %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
1919 %1 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %0, 0
1920 %2 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.mask.nxv1i16(<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1,<vscale x 1 x i16> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
1921 %3 = extractvalue {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} %2, 1
1922 ret <vscale x 1 x i16> %3
1925 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, i32, i32)
1926 declare {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i8>, ptr, i32, <vscale x 32 x i1>, i32, i32)
1928 define <vscale x 32 x i8> @test_vlsseg2_nxv32i8(ptr %base, i32 %offset, i32 %vl) {
1929 ; CHECK-LABEL: test_vlsseg2_nxv32i8:
1930 ; CHECK: # %bb.0: # %entry
1931 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, ma
1932 ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
1935 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1936 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 1
1937 ret <vscale x 32 x i8> %1
1940 define <vscale x 32 x i8> @test_vlsseg2_mask_nxv32i8(ptr %base, i32 %offset, i32 %vl, <vscale x 32 x i1> %mask) {
1941 ; CHECK-LABEL: test_vlsseg2_mask_nxv32i8:
1942 ; CHECK: # %bb.0: # %entry
1943 ; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
1944 ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1
1945 ; CHECK-NEXT: vmv4r.v v8, v4
1946 ; CHECK-NEXT: vlsseg2e8.v v4, (a0), a1, v0.t
1949 %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1950 %1 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %0, 0
1951 %2 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.mask.nxv32i8(<vscale x 32 x i8> %1,<vscale x 32 x i8> %1, ptr %base, i32 %offset, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
1952 %3 = extractvalue {<vscale x 32 x i8>,<vscale x 32 x i8>} %2, 1
1953 ret <vscale x 32 x i8> %3
1956 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, i32)
1957 declare {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>, i32, i32)
1959 define <vscale x 2 x i8> @test_vlsseg2_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
1960 ; CHECK-LABEL: test_vlsseg2_nxv2i8:
1961 ; CHECK: # %bb.0: # %entry
1962 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
1963 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
1966 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1967 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
1968 ret <vscale x 2 x i8> %1
1971 define <vscale x 2 x i8> @test_vlsseg2_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
1972 ; CHECK-LABEL: test_vlsseg2_mask_nxv2i8:
1973 ; CHECK: # %bb.0: # %entry
1974 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
1975 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1
1976 ; CHECK-NEXT: vmv1r.v v8, v7
1977 ; CHECK-NEXT: vlsseg2e8.v v7, (a0), a1, v0.t
1980 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1981 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
1982 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
1983 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
1984 ret <vscale x 2 x i8> %3
1987 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, i32)
1988 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>, i32, i32)
1990 define <vscale x 2 x i8> @test_vlsseg3_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
1991 ; CHECK-LABEL: test_vlsseg3_nxv2i8:
1992 ; CHECK: # %bb.0: # %entry
1993 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
1994 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
1997 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
1998 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
1999 ret <vscale x 2 x i8> %1
2002 define <vscale x 2 x i8> @test_vlsseg3_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2003 ; CHECK-LABEL: test_vlsseg3_mask_nxv2i8:
2004 ; CHECK: # %bb.0: # %entry
2005 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2006 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1
2007 ; CHECK-NEXT: vmv1r.v v8, v7
2008 ; CHECK-NEXT: vmv1r.v v9, v7
2009 ; CHECK-NEXT: vlsseg3e8.v v7, (a0), a1, v0.t
2012 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2013 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2014 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2015 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2016 ret <vscale x 2 x i8> %3
2019 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, i32)
2020 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2022 define <vscale x 2 x i8> @test_vlsseg4_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
2023 ; CHECK-LABEL: test_vlsseg4_nxv2i8:
2024 ; CHECK: # %bb.0: # %entry
2025 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2026 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
2029 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2030 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2031 ret <vscale x 2 x i8> %1
2034 define <vscale x 2 x i8> @test_vlsseg4_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2035 ; CHECK-LABEL: test_vlsseg4_mask_nxv2i8:
2036 ; CHECK: # %bb.0: # %entry
2037 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2038 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1
2039 ; CHECK-NEXT: vmv1r.v v8, v7
2040 ; CHECK-NEXT: vmv1r.v v9, v7
2041 ; CHECK-NEXT: vmv1r.v v10, v7
2042 ; CHECK-NEXT: vlsseg4e8.v v7, (a0), a1, v0.t
2045 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2046 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2047 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2048 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2049 ret <vscale x 2 x i8> %3
2052 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, i32)
2053 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2055 define <vscale x 2 x i8> @test_vlsseg5_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
2056 ; CHECK-LABEL: test_vlsseg5_nxv2i8:
2057 ; CHECK: # %bb.0: # %entry
2058 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2059 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
2062 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2063 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2064 ret <vscale x 2 x i8> %1
2067 define <vscale x 2 x i8> @test_vlsseg5_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2068 ; CHECK-LABEL: test_vlsseg5_mask_nxv2i8:
2069 ; CHECK: # %bb.0: # %entry
2070 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2071 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1
2072 ; CHECK-NEXT: vmv1r.v v8, v7
2073 ; CHECK-NEXT: vmv1r.v v9, v7
2074 ; CHECK-NEXT: vmv1r.v v10, v7
2075 ; CHECK-NEXT: vmv1r.v v11, v7
2076 ; CHECK-NEXT: vlsseg5e8.v v7, (a0), a1, v0.t
2079 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2080 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2081 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2082 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2083 ret <vscale x 2 x i8> %3
2086 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, i32)
2087 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2089 define <vscale x 2 x i8> @test_vlsseg6_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
2090 ; CHECK-LABEL: test_vlsseg6_nxv2i8:
2091 ; CHECK: # %bb.0: # %entry
2092 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2093 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
2096 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2097 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2098 ret <vscale x 2 x i8> %1
2101 define <vscale x 2 x i8> @test_vlsseg6_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2102 ; CHECK-LABEL: test_vlsseg6_mask_nxv2i8:
2103 ; CHECK: # %bb.0: # %entry
2104 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2105 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1
2106 ; CHECK-NEXT: vmv1r.v v8, v7
2107 ; CHECK-NEXT: vmv1r.v v9, v7
2108 ; CHECK-NEXT: vmv1r.v v10, v7
2109 ; CHECK-NEXT: vmv1r.v v11, v7
2110 ; CHECK-NEXT: vmv1r.v v12, v7
2111 ; CHECK-NEXT: vlsseg6e8.v v7, (a0), a1, v0.t
2114 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2115 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2116 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2117 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2118 ret <vscale x 2 x i8> %3
2121 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, i32)
2122 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2124 define <vscale x 2 x i8> @test_vlsseg7_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
2125 ; CHECK-LABEL: test_vlsseg7_nxv2i8:
2126 ; CHECK: # %bb.0: # %entry
2127 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2128 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
2131 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2132 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2133 ret <vscale x 2 x i8> %1
2136 define <vscale x 2 x i8> @test_vlsseg7_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2137 ; CHECK-LABEL: test_vlsseg7_mask_nxv2i8:
2138 ; CHECK: # %bb.0: # %entry
2139 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2140 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1
2141 ; CHECK-NEXT: vmv1r.v v8, v7
2142 ; CHECK-NEXT: vmv1r.v v9, v7
2143 ; CHECK-NEXT: vmv1r.v v10, v7
2144 ; CHECK-NEXT: vmv1r.v v11, v7
2145 ; CHECK-NEXT: vmv1r.v v12, v7
2146 ; CHECK-NEXT: vmv1r.v v13, v7
2147 ; CHECK-NEXT: vlsseg7e8.v v7, (a0), a1, v0.t
2150 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2151 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2152 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2153 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2154 ret <vscale x 2 x i8> %3
2157 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, i32)
2158 declare {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2160 define <vscale x 2 x i8> @test_vlsseg8_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
2161 ; CHECK-LABEL: test_vlsseg8_nxv2i8:
2162 ; CHECK: # %bb.0: # %entry
2163 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
2164 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
2167 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2168 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 1
2169 ret <vscale x 2 x i8> %1
2172 define <vscale x 2 x i8> @test_vlsseg8_mask_nxv2i8(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2173 ; CHECK-LABEL: test_vlsseg8_mask_nxv2i8:
2174 ; CHECK: # %bb.0: # %entry
2175 ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
2176 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1
2177 ; CHECK-NEXT: vmv1r.v v8, v7
2178 ; CHECK-NEXT: vmv1r.v v9, v7
2179 ; CHECK-NEXT: vmv1r.v v10, v7
2180 ; CHECK-NEXT: vmv1r.v v11, v7
2181 ; CHECK-NEXT: vmv1r.v v12, v7
2182 ; CHECK-NEXT: vmv1r.v v13, v7
2183 ; CHECK-NEXT: vmv1r.v v14, v7
2184 ; CHECK-NEXT: vlsseg8e8.v v7, (a0), a1, v0.t
2187 %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
2188 %1 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %0, 0
2189 %2 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.mask.nxv2i8(<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1,<vscale x 2 x i8> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2190 %3 = extractvalue {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} %2, 1
2191 ret <vscale x 2 x i8> %3
2194 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, i32)
2195 declare {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2197 define <vscale x 2 x i16> @test_vlsseg2_nxv2i16(ptr %base, i32 %offset, i32 %vl) {
2198 ; CHECK-LABEL: test_vlsseg2_nxv2i16:
2199 ; CHECK: # %bb.0: # %entry
2200 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2201 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
2204 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2205 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2206 ret <vscale x 2 x i16> %1
2209 define <vscale x 2 x i16> @test_vlsseg2_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2210 ; CHECK-LABEL: test_vlsseg2_mask_nxv2i16:
2211 ; CHECK: # %bb.0: # %entry
2212 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2213 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
2214 ; CHECK-NEXT: vmv1r.v v8, v7
2215 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
2218 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2219 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2220 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2221 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2222 ret <vscale x 2 x i16> %3
2225 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, i32)
2226 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2228 define <vscale x 2 x i16> @test_vlsseg3_nxv2i16(ptr %base, i32 %offset, i32 %vl) {
2229 ; CHECK-LABEL: test_vlsseg3_nxv2i16:
2230 ; CHECK: # %bb.0: # %entry
2231 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2232 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
2235 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2236 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2237 ret <vscale x 2 x i16> %1
2240 define <vscale x 2 x i16> @test_vlsseg3_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2241 ; CHECK-LABEL: test_vlsseg3_mask_nxv2i16:
2242 ; CHECK: # %bb.0: # %entry
2243 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2244 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
2245 ; CHECK-NEXT: vmv1r.v v8, v7
2246 ; CHECK-NEXT: vmv1r.v v9, v7
2247 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
2250 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2251 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2252 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2253 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2254 ret <vscale x 2 x i16> %3
2257 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, i32)
2258 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2260 define <vscale x 2 x i16> @test_vlsseg4_nxv2i16(ptr %base, i32 %offset, i32 %vl) {
2261 ; CHECK-LABEL: test_vlsseg4_nxv2i16:
2262 ; CHECK: # %bb.0: # %entry
2263 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2264 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
2267 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2268 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2269 ret <vscale x 2 x i16> %1
2272 define <vscale x 2 x i16> @test_vlsseg4_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2273 ; CHECK-LABEL: test_vlsseg4_mask_nxv2i16:
2274 ; CHECK: # %bb.0: # %entry
2275 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2276 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
2277 ; CHECK-NEXT: vmv1r.v v8, v7
2278 ; CHECK-NEXT: vmv1r.v v9, v7
2279 ; CHECK-NEXT: vmv1r.v v10, v7
2280 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
2283 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2284 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2285 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2286 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2287 ret <vscale x 2 x i16> %3
2290 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, i32)
2291 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2293 define <vscale x 2 x i16> @test_vlsseg5_nxv2i16(ptr %base, i32 %offset, i32 %vl) {
2294 ; CHECK-LABEL: test_vlsseg5_nxv2i16:
2295 ; CHECK: # %bb.0: # %entry
2296 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2297 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
2300 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2301 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2302 ret <vscale x 2 x i16> %1
2305 define <vscale x 2 x i16> @test_vlsseg5_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2306 ; CHECK-LABEL: test_vlsseg5_mask_nxv2i16:
2307 ; CHECK: # %bb.0: # %entry
2308 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2309 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
2310 ; CHECK-NEXT: vmv1r.v v8, v7
2311 ; CHECK-NEXT: vmv1r.v v9, v7
2312 ; CHECK-NEXT: vmv1r.v v10, v7
2313 ; CHECK-NEXT: vmv1r.v v11, v7
2314 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
2317 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2318 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2319 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2320 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2321 ret <vscale x 2 x i16> %3
2324 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, i32)
2325 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2327 define <vscale x 2 x i16> @test_vlsseg6_nxv2i16(ptr %base, i32 %offset, i32 %vl) {
2328 ; CHECK-LABEL: test_vlsseg6_nxv2i16:
2329 ; CHECK: # %bb.0: # %entry
2330 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2331 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
2334 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2335 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2336 ret <vscale x 2 x i16> %1
2339 define <vscale x 2 x i16> @test_vlsseg6_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2340 ; CHECK-LABEL: test_vlsseg6_mask_nxv2i16:
2341 ; CHECK: # %bb.0: # %entry
2342 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2343 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
2344 ; CHECK-NEXT: vmv1r.v v8, v7
2345 ; CHECK-NEXT: vmv1r.v v9, v7
2346 ; CHECK-NEXT: vmv1r.v v10, v7
2347 ; CHECK-NEXT: vmv1r.v v11, v7
2348 ; CHECK-NEXT: vmv1r.v v12, v7
2349 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
2352 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2353 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2354 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2355 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2356 ret <vscale x 2 x i16> %3
2359 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, i32)
2360 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2362 define <vscale x 2 x i16> @test_vlsseg7_nxv2i16(ptr %base, i32 %offset, i32 %vl) {
2363 ; CHECK-LABEL: test_vlsseg7_nxv2i16:
2364 ; CHECK: # %bb.0: # %entry
2365 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2366 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
2369 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2370 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2371 ret <vscale x 2 x i16> %1
2374 define <vscale x 2 x i16> @test_vlsseg7_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2375 ; CHECK-LABEL: test_vlsseg7_mask_nxv2i16:
2376 ; CHECK: # %bb.0: # %entry
2377 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2378 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
2379 ; CHECK-NEXT: vmv1r.v v8, v7
2380 ; CHECK-NEXT: vmv1r.v v9, v7
2381 ; CHECK-NEXT: vmv1r.v v10, v7
2382 ; CHECK-NEXT: vmv1r.v v11, v7
2383 ; CHECK-NEXT: vmv1r.v v12, v7
2384 ; CHECK-NEXT: vmv1r.v v13, v7
2385 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
2388 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2389 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2390 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2391 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2392 ret <vscale x 2 x i16> %3
2395 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, i32)
2396 declare {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2398 define <vscale x 2 x i16> @test_vlsseg8_nxv2i16(ptr %base, i32 %offset, i32 %vl) {
2399 ; CHECK-LABEL: test_vlsseg8_nxv2i16:
2400 ; CHECK: # %bb.0: # %entry
2401 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
2402 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
2405 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2406 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 1
2407 ret <vscale x 2 x i16> %1
2410 define <vscale x 2 x i16> @test_vlsseg8_mask_nxv2i16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2411 ; CHECK-LABEL: test_vlsseg8_mask_nxv2i16:
2412 ; CHECK: # %bb.0: # %entry
2413 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
2414 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
2415 ; CHECK-NEXT: vmv1r.v v8, v7
2416 ; CHECK-NEXT: vmv1r.v v9, v7
2417 ; CHECK-NEXT: vmv1r.v v10, v7
2418 ; CHECK-NEXT: vmv1r.v v11, v7
2419 ; CHECK-NEXT: vmv1r.v v12, v7
2420 ; CHECK-NEXT: vmv1r.v v13, v7
2421 ; CHECK-NEXT: vmv1r.v v14, v7
2422 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
2425 %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
2426 %1 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %0, 0
2427 %2 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.mask.nxv2i16(<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1,<vscale x 2 x i16> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2428 %3 = extractvalue {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} %2, 1
2429 ret <vscale x 2 x i16> %3
2432 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i32, i32)
2433 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>, i32, i32)
2435 define <vscale x 4 x i32> @test_vlsseg2_nxv4i32(ptr %base, i32 %offset, i32 %vl) {
2436 ; CHECK-LABEL: test_vlsseg2_nxv4i32:
2437 ; CHECK: # %bb.0: # %entry
2438 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2439 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
2442 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
2443 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
2444 ret <vscale x 4 x i32> %1
2447 define <vscale x 4 x i32> @test_vlsseg2_mask_nxv4i32(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
2448 ; CHECK-LABEL: test_vlsseg2_mask_nxv4i32:
2449 ; CHECK: # %bb.0: # %entry
2450 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
2451 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
2452 ; CHECK-NEXT: vmv2r.v v8, v6
2453 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
2456 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
2457 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
2458 %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2459 %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
2460 ret <vscale x 4 x i32> %3
2463 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i32, i32)
2464 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>, i32, i32)
2466 define <vscale x 4 x i32> @test_vlsseg3_nxv4i32(ptr %base, i32 %offset, i32 %vl) {
2467 ; CHECK-LABEL: test_vlsseg3_nxv4i32:
2468 ; CHECK: # %bb.0: # %entry
2469 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2470 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
2473 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
2474 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
2475 ret <vscale x 4 x i32> %1
2478 define <vscale x 4 x i32> @test_vlsseg3_mask_nxv4i32(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
2479 ; CHECK-LABEL: test_vlsseg3_mask_nxv4i32:
2480 ; CHECK: # %bb.0: # %entry
2481 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
2482 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
2483 ; CHECK-NEXT: vmv2r.v v8, v6
2484 ; CHECK-NEXT: vmv2r.v v10, v6
2485 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
2488 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
2489 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
2490 %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2491 %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
2492 ret <vscale x 4 x i32> %3
2495 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i32, i32)
2496 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr, i32, <vscale x 4 x i1>, i32, i32)
2498 define <vscale x 4 x i32> @test_vlsseg4_nxv4i32(ptr %base, i32 %offset, i32 %vl) {
2499 ; CHECK-LABEL: test_vlsseg4_nxv4i32:
2500 ; CHECK: # %bb.0: # %entry
2501 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
2502 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
2505 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
2506 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
2507 ret <vscale x 4 x i32> %1
2510 define <vscale x 4 x i32> @test_vlsseg4_mask_nxv4i32(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
2511 ; CHECK-LABEL: test_vlsseg4_mask_nxv4i32:
2512 ; CHECK: # %bb.0: # %entry
2513 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
2514 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
2515 ; CHECK-NEXT: vmv2r.v v8, v6
2516 ; CHECK-NEXT: vmv2r.v v10, v6
2517 ; CHECK-NEXT: vmv2r.v v12, v6
2518 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
2521 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
2522 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 0
2523 %2 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.mask.nxv4i32(<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1,<vscale x 4 x i32> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2524 %3 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %2, 1
2525 ret <vscale x 4 x i32> %3
2528 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, i32, i32)
2529 declare {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16 x half>, ptr, i32, <vscale x 16 x i1>, i32, i32)
2531 define <vscale x 16 x half> @test_vlsseg2_nxv16f16(ptr %base, i32 %offset, i32 %vl) {
2532 ; CHECK-LABEL: test_vlsseg2_nxv16f16:
2533 ; CHECK: # %bb.0: # %entry
2534 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
2535 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
2538 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %offset, i32 %vl)
2539 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 1
2540 ret <vscale x 16 x half> %1
2543 define <vscale x 16 x half> @test_vlsseg2_mask_nxv16f16(ptr %base, i32 %offset, i32 %vl, <vscale x 16 x i1> %mask) {
2544 ; CHECK-LABEL: test_vlsseg2_mask_nxv16f16:
2545 ; CHECK: # %bb.0: # %entry
2546 ; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
2547 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
2548 ; CHECK-NEXT: vmv4r.v v8, v4
2549 ; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
2552 %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %offset, i32 %vl)
2553 %1 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %0, 0
2554 %2 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.mask.nxv16f16(<vscale x 16 x half> %1,<vscale x 16 x half> %1, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
2555 %3 = extractvalue {<vscale x 16 x half>,<vscale x 16 x half>} %2, 1
2556 ret <vscale x 16 x half> %3
2559 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, i32, i32)
2560 declare {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x double>, ptr, i32, <vscale x 4 x i1>, i32, i32)
2562 define <vscale x 4 x double> @test_vlsseg2_nxv4f64(ptr %base, i32 %offset, i32 %vl) {
2563 ; CHECK-LABEL: test_vlsseg2_nxv4f64:
2564 ; CHECK: # %bb.0: # %entry
2565 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
2566 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
2569 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %offset, i32 %vl)
2570 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 1
2571 ret <vscale x 4 x double> %1
2574 define <vscale x 4 x double> @test_vlsseg2_mask_nxv4f64(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
2575 ; CHECK-LABEL: test_vlsseg2_mask_nxv4f64:
2576 ; CHECK: # %bb.0: # %entry
2577 ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, mu
2578 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1
2579 ; CHECK-NEXT: vmv4r.v v8, v4
2580 ; CHECK-NEXT: vlsseg2e64.v v4, (a0), a1, v0.t
2583 %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %offset, i32 %vl)
2584 %1 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %0, 0
2585 %2 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.mask.nxv4f64(<vscale x 4 x double> %1,<vscale x 4 x double> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
2586 %3 = extractvalue {<vscale x 4 x double>,<vscale x 4 x double>} %2, 1
2587 ret <vscale x 4 x double> %3
2590 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, i32)
2591 declare {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>, i32, i32)
2593 define <vscale x 1 x double> @test_vlsseg2_nxv1f64(ptr %base, i32 %offset, i32 %vl) {
2594 ; CHECK-LABEL: test_vlsseg2_nxv1f64:
2595 ; CHECK: # %bb.0: # %entry
2596 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2597 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
2600 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2601 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2602 ret <vscale x 1 x double> %1
2605 define <vscale x 1 x double> @test_vlsseg2_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
2606 ; CHECK-LABEL: test_vlsseg2_mask_nxv1f64:
2607 ; CHECK: # %bb.0: # %entry
2608 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2609 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1
2610 ; CHECK-NEXT: vmv1r.v v8, v7
2611 ; CHECK-NEXT: vlsseg2e64.v v7, (a0), a1, v0.t
2614 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2615 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
2616 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2617 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
2618 ret <vscale x 1 x double> %3
2621 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, i32)
2622 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>, i32, i32)
2624 define <vscale x 1 x double> @test_vlsseg3_nxv1f64(ptr %base, i32 %offset, i32 %vl) {
2625 ; CHECK-LABEL: test_vlsseg3_nxv1f64:
2626 ; CHECK: # %bb.0: # %entry
2627 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2628 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
2631 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2632 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2633 ret <vscale x 1 x double> %1
2636 define <vscale x 1 x double> @test_vlsseg3_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
2637 ; CHECK-LABEL: test_vlsseg3_mask_nxv1f64:
2638 ; CHECK: # %bb.0: # %entry
2639 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2640 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1
2641 ; CHECK-NEXT: vmv1r.v v8, v7
2642 ; CHECK-NEXT: vmv1r.v v9, v7
2643 ; CHECK-NEXT: vlsseg3e64.v v7, (a0), a1, v0.t
2646 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2647 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
2648 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2649 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
2650 ret <vscale x 1 x double> %3
2653 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, i32)
2654 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>, i32, i32)
2656 define <vscale x 1 x double> @test_vlsseg4_nxv1f64(ptr %base, i32 %offset, i32 %vl) {
2657 ; CHECK-LABEL: test_vlsseg4_nxv1f64:
2658 ; CHECK: # %bb.0: # %entry
2659 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2660 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
2663 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2664 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2665 ret <vscale x 1 x double> %1
2668 define <vscale x 1 x double> @test_vlsseg4_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
2669 ; CHECK-LABEL: test_vlsseg4_mask_nxv1f64:
2670 ; CHECK: # %bb.0: # %entry
2671 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2672 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1
2673 ; CHECK-NEXT: vmv1r.v v8, v7
2674 ; CHECK-NEXT: vmv1r.v v9, v7
2675 ; CHECK-NEXT: vmv1r.v v10, v7
2676 ; CHECK-NEXT: vlsseg4e64.v v7, (a0), a1, v0.t
2679 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2680 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
2681 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2682 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
2683 ret <vscale x 1 x double> %3
2686 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, i32)
2687 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>, i32, i32)
2689 define <vscale x 1 x double> @test_vlsseg5_nxv1f64(ptr %base, i32 %offset, i32 %vl) {
2690 ; CHECK-LABEL: test_vlsseg5_nxv1f64:
2691 ; CHECK: # %bb.0: # %entry
2692 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2693 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
2696 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2697 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2698 ret <vscale x 1 x double> %1
2701 define <vscale x 1 x double> @test_vlsseg5_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
2702 ; CHECK-LABEL: test_vlsseg5_mask_nxv1f64:
2703 ; CHECK: # %bb.0: # %entry
2704 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2705 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1
2706 ; CHECK-NEXT: vmv1r.v v8, v7
2707 ; CHECK-NEXT: vmv1r.v v9, v7
2708 ; CHECK-NEXT: vmv1r.v v10, v7
2709 ; CHECK-NEXT: vmv1r.v v11, v7
2710 ; CHECK-NEXT: vlsseg5e64.v v7, (a0), a1, v0.t
2713 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2714 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
2715 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2716 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
2717 ret <vscale x 1 x double> %3
2720 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, i32)
2721 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>, i32, i32)
2723 define <vscale x 1 x double> @test_vlsseg6_nxv1f64(ptr %base, i32 %offset, i32 %vl) {
2724 ; CHECK-LABEL: test_vlsseg6_nxv1f64:
2725 ; CHECK: # %bb.0: # %entry
2726 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2727 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
2730 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2731 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2732 ret <vscale x 1 x double> %1
2735 define <vscale x 1 x double> @test_vlsseg6_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
2736 ; CHECK-LABEL: test_vlsseg6_mask_nxv1f64:
2737 ; CHECK: # %bb.0: # %entry
2738 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2739 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1
2740 ; CHECK-NEXT: vmv1r.v v8, v7
2741 ; CHECK-NEXT: vmv1r.v v9, v7
2742 ; CHECK-NEXT: vmv1r.v v10, v7
2743 ; CHECK-NEXT: vmv1r.v v11, v7
2744 ; CHECK-NEXT: vmv1r.v v12, v7
2745 ; CHECK-NEXT: vlsseg6e64.v v7, (a0), a1, v0.t
2748 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2749 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
2750 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2751 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
2752 ret <vscale x 1 x double> %3
2755 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, i32)
2756 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>, i32, i32)
2758 define <vscale x 1 x double> @test_vlsseg7_nxv1f64(ptr %base, i32 %offset, i32 %vl) {
2759 ; CHECK-LABEL: test_vlsseg7_nxv1f64:
2760 ; CHECK: # %bb.0: # %entry
2761 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2762 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
2765 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2766 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2767 ret <vscale x 1 x double> %1
2770 define <vscale x 1 x double> @test_vlsseg7_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
2771 ; CHECK-LABEL: test_vlsseg7_mask_nxv1f64:
2772 ; CHECK: # %bb.0: # %entry
2773 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2774 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1
2775 ; CHECK-NEXT: vmv1r.v v8, v7
2776 ; CHECK-NEXT: vmv1r.v v9, v7
2777 ; CHECK-NEXT: vmv1r.v v10, v7
2778 ; CHECK-NEXT: vmv1r.v v11, v7
2779 ; CHECK-NEXT: vmv1r.v v12, v7
2780 ; CHECK-NEXT: vmv1r.v v13, v7
2781 ; CHECK-NEXT: vlsseg7e64.v v7, (a0), a1, v0.t
2784 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2785 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
2786 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2787 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
2788 ret <vscale x 1 x double> %3
2791 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, i32)
2792 declare {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, ptr, i32, <vscale x 1 x i1>, i32, i32)
2794 define <vscale x 1 x double> @test_vlsseg8_nxv1f64(ptr %base, i32 %offset, i32 %vl) {
2795 ; CHECK-LABEL: test_vlsseg8_nxv1f64:
2796 ; CHECK: # %bb.0: # %entry
2797 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
2798 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
2801 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2802 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 1
2803 ret <vscale x 1 x double> %1
2806 define <vscale x 1 x double> @test_vlsseg8_mask_nxv1f64(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
2807 ; CHECK-LABEL: test_vlsseg8_mask_nxv1f64:
2808 ; CHECK: # %bb.0: # %entry
2809 ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu
2810 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1
2811 ; CHECK-NEXT: vmv1r.v v8, v7
2812 ; CHECK-NEXT: vmv1r.v v9, v7
2813 ; CHECK-NEXT: vmv1r.v v10, v7
2814 ; CHECK-NEXT: vmv1r.v v11, v7
2815 ; CHECK-NEXT: vmv1r.v v12, v7
2816 ; CHECK-NEXT: vmv1r.v v13, v7
2817 ; CHECK-NEXT: vmv1r.v v14, v7
2818 ; CHECK-NEXT: vlsseg8e64.v v7, (a0), a1, v0.t
2821 %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
2822 %1 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %0, 0
2823 %2 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.mask.nxv1f64(<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1,<vscale x 1 x double> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
2824 %3 = extractvalue {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} %2, 1
2825 ret <vscale x 1 x double> %3
2828 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, i32)
2829 declare {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2831 define <vscale x 2 x float> @test_vlsseg2_nxv2f32(ptr %base, i32 %offset, i32 %vl) {
2832 ; CHECK-LABEL: test_vlsseg2_nxv2f32:
2833 ; CHECK: # %bb.0: # %entry
2834 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2835 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
2838 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2839 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
2840 ret <vscale x 2 x float> %1
2843 define <vscale x 2 x float> @test_vlsseg2_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2844 ; CHECK-LABEL: test_vlsseg2_mask_nxv2f32:
2845 ; CHECK: # %bb.0: # %entry
2846 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
2847 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
2848 ; CHECK-NEXT: vmv1r.v v8, v7
2849 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
2852 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2853 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
2854 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2855 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
2856 ret <vscale x 2 x float> %3
2859 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, i32)
2860 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2862 define <vscale x 2 x float> @test_vlsseg3_nxv2f32(ptr %base, i32 %offset, i32 %vl) {
2863 ; CHECK-LABEL: test_vlsseg3_nxv2f32:
2864 ; CHECK: # %bb.0: # %entry
2865 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2866 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
2869 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2870 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
2871 ret <vscale x 2 x float> %1
2874 define <vscale x 2 x float> @test_vlsseg3_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2875 ; CHECK-LABEL: test_vlsseg3_mask_nxv2f32:
2876 ; CHECK: # %bb.0: # %entry
2877 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
2878 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
2879 ; CHECK-NEXT: vmv1r.v v8, v7
2880 ; CHECK-NEXT: vmv1r.v v9, v7
2881 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
2884 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2885 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
2886 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2887 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
2888 ret <vscale x 2 x float> %3
2891 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, i32)
2892 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2894 define <vscale x 2 x float> @test_vlsseg4_nxv2f32(ptr %base, i32 %offset, i32 %vl) {
2895 ; CHECK-LABEL: test_vlsseg4_nxv2f32:
2896 ; CHECK: # %bb.0: # %entry
2897 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2898 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
2901 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2902 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
2903 ret <vscale x 2 x float> %1
2906 define <vscale x 2 x float> @test_vlsseg4_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2907 ; CHECK-LABEL: test_vlsseg4_mask_nxv2f32:
2908 ; CHECK: # %bb.0: # %entry
2909 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
2910 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
2911 ; CHECK-NEXT: vmv1r.v v8, v7
2912 ; CHECK-NEXT: vmv1r.v v9, v7
2913 ; CHECK-NEXT: vmv1r.v v10, v7
2914 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
2917 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2918 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
2919 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2920 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
2921 ret <vscale x 2 x float> %3
2924 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, i32)
2925 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2927 define <vscale x 2 x float> @test_vlsseg5_nxv2f32(ptr %base, i32 %offset, i32 %vl) {
2928 ; CHECK-LABEL: test_vlsseg5_nxv2f32:
2929 ; CHECK: # %bb.0: # %entry
2930 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2931 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
2934 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2935 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
2936 ret <vscale x 2 x float> %1
2939 define <vscale x 2 x float> @test_vlsseg5_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2940 ; CHECK-LABEL: test_vlsseg5_mask_nxv2f32:
2941 ; CHECK: # %bb.0: # %entry
2942 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
2943 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
2944 ; CHECK-NEXT: vmv1r.v v8, v7
2945 ; CHECK-NEXT: vmv1r.v v9, v7
2946 ; CHECK-NEXT: vmv1r.v v10, v7
2947 ; CHECK-NEXT: vmv1r.v v11, v7
2948 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
2951 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2952 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
2953 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2954 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
2955 ret <vscale x 2 x float> %3
2958 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, i32)
2959 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2961 define <vscale x 2 x float> @test_vlsseg6_nxv2f32(ptr %base, i32 %offset, i32 %vl) {
2962 ; CHECK-LABEL: test_vlsseg6_nxv2f32:
2963 ; CHECK: # %bb.0: # %entry
2964 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
2965 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
2968 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2969 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
2970 ret <vscale x 2 x float> %1
2973 define <vscale x 2 x float> @test_vlsseg6_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
2974 ; CHECK-LABEL: test_vlsseg6_mask_nxv2f32:
2975 ; CHECK: # %bb.0: # %entry
2976 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
2977 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
2978 ; CHECK-NEXT: vmv1r.v v8, v7
2979 ; CHECK-NEXT: vmv1r.v v9, v7
2980 ; CHECK-NEXT: vmv1r.v v10, v7
2981 ; CHECK-NEXT: vmv1r.v v11, v7
2982 ; CHECK-NEXT: vmv1r.v v12, v7
2983 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
2986 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
2987 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
2988 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
2989 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
2990 ret <vscale x 2 x float> %3
2993 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, i32)
2994 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>, i32, i32)
2996 define <vscale x 2 x float> @test_vlsseg7_nxv2f32(ptr %base, i32 %offset, i32 %vl) {
2997 ; CHECK-LABEL: test_vlsseg7_nxv2f32:
2998 ; CHECK: # %bb.0: # %entry
2999 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3000 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
3003 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
3004 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3005 ret <vscale x 2 x float> %1
3008 define <vscale x 2 x float> @test_vlsseg7_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
3009 ; CHECK-LABEL: test_vlsseg7_mask_nxv2f32:
3010 ; CHECK: # %bb.0: # %entry
3011 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3012 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
3013 ; CHECK-NEXT: vmv1r.v v8, v7
3014 ; CHECK-NEXT: vmv1r.v v9, v7
3015 ; CHECK-NEXT: vmv1r.v v10, v7
3016 ; CHECK-NEXT: vmv1r.v v11, v7
3017 ; CHECK-NEXT: vmv1r.v v12, v7
3018 ; CHECK-NEXT: vmv1r.v v13, v7
3019 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
3022 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
3023 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3024 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3025 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3026 ret <vscale x 2 x float> %3
3029 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, i32)
3030 declare {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, ptr, i32, <vscale x 2 x i1>, i32, i32)
3032 define <vscale x 2 x float> @test_vlsseg8_nxv2f32(ptr %base, i32 %offset, i32 %vl) {
3033 ; CHECK-LABEL: test_vlsseg8_nxv2f32:
3034 ; CHECK: # %bb.0: # %entry
3035 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
3036 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
3039 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
3040 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 1
3041 ret <vscale x 2 x float> %1
3044 define <vscale x 2 x float> @test_vlsseg8_mask_nxv2f32(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
3045 ; CHECK-LABEL: test_vlsseg8_mask_nxv2f32:
3046 ; CHECK: # %bb.0: # %entry
3047 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
3048 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
3049 ; CHECK-NEXT: vmv1r.v v8, v7
3050 ; CHECK-NEXT: vmv1r.v v9, v7
3051 ; CHECK-NEXT: vmv1r.v v10, v7
3052 ; CHECK-NEXT: vmv1r.v v11, v7
3053 ; CHECK-NEXT: vmv1r.v v12, v7
3054 ; CHECK-NEXT: vmv1r.v v13, v7
3055 ; CHECK-NEXT: vmv1r.v v14, v7
3056 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
3059 %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
3060 %1 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %0, 0
3061 %2 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.mask.nxv2f32(<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1,<vscale x 2 x float> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3062 %3 = extractvalue {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} %2, 1
3063 ret <vscale x 2 x float> %3
3066 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, i32)
3067 declare {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3069 define <vscale x 1 x half> @test_vlsseg2_nxv1f16(ptr %base, i32 %offset, i32 %vl) {
3070 ; CHECK-LABEL: test_vlsseg2_nxv1f16:
3071 ; CHECK: # %bb.0: # %entry
3072 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3073 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
3076 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3077 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3078 ret <vscale x 1 x half> %1
3081 define <vscale x 1 x half> @test_vlsseg2_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3082 ; CHECK-LABEL: test_vlsseg2_mask_nxv1f16:
3083 ; CHECK: # %bb.0: # %entry
3084 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3085 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
3086 ; CHECK-NEXT: vmv1r.v v8, v7
3087 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
3090 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3091 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3092 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3093 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3094 ret <vscale x 1 x half> %3
3097 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, i32)
3098 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3100 define <vscale x 1 x half> @test_vlsseg3_nxv1f16(ptr %base, i32 %offset, i32 %vl) {
3101 ; CHECK-LABEL: test_vlsseg3_nxv1f16:
3102 ; CHECK: # %bb.0: # %entry
3103 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3104 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
3107 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3108 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3109 ret <vscale x 1 x half> %1
3112 define <vscale x 1 x half> @test_vlsseg3_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3113 ; CHECK-LABEL: test_vlsseg3_mask_nxv1f16:
3114 ; CHECK: # %bb.0: # %entry
3115 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3116 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
3117 ; CHECK-NEXT: vmv1r.v v8, v7
3118 ; CHECK-NEXT: vmv1r.v v9, v7
3119 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
3122 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3123 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3124 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3125 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3126 ret <vscale x 1 x half> %3
3129 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, i32)
3130 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3132 define <vscale x 1 x half> @test_vlsseg4_nxv1f16(ptr %base, i32 %offset, i32 %vl) {
3133 ; CHECK-LABEL: test_vlsseg4_nxv1f16:
3134 ; CHECK: # %bb.0: # %entry
3135 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3136 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
3139 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3140 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3141 ret <vscale x 1 x half> %1
3144 define <vscale x 1 x half> @test_vlsseg4_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3145 ; CHECK-LABEL: test_vlsseg4_mask_nxv1f16:
3146 ; CHECK: # %bb.0: # %entry
3147 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3148 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
3149 ; CHECK-NEXT: vmv1r.v v8, v7
3150 ; CHECK-NEXT: vmv1r.v v9, v7
3151 ; CHECK-NEXT: vmv1r.v v10, v7
3152 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
3155 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3156 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3157 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3158 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3159 ret <vscale x 1 x half> %3
3162 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, i32)
3163 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3165 define <vscale x 1 x half> @test_vlsseg5_nxv1f16(ptr %base, i32 %offset, i32 %vl) {
3166 ; CHECK-LABEL: test_vlsseg5_nxv1f16:
3167 ; CHECK: # %bb.0: # %entry
3168 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3169 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
3172 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3173 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3174 ret <vscale x 1 x half> %1
3177 define <vscale x 1 x half> @test_vlsseg5_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3178 ; CHECK-LABEL: test_vlsseg5_mask_nxv1f16:
3179 ; CHECK: # %bb.0: # %entry
3180 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3181 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
3182 ; CHECK-NEXT: vmv1r.v v8, v7
3183 ; CHECK-NEXT: vmv1r.v v9, v7
3184 ; CHECK-NEXT: vmv1r.v v10, v7
3185 ; CHECK-NEXT: vmv1r.v v11, v7
3186 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
3189 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3190 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3191 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3192 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3193 ret <vscale x 1 x half> %3
3196 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, i32)
3197 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3199 define <vscale x 1 x half> @test_vlsseg6_nxv1f16(ptr %base, i32 %offset, i32 %vl) {
3200 ; CHECK-LABEL: test_vlsseg6_nxv1f16:
3201 ; CHECK: # %bb.0: # %entry
3202 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3203 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
3206 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3207 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3208 ret <vscale x 1 x half> %1
3211 define <vscale x 1 x half> @test_vlsseg6_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3212 ; CHECK-LABEL: test_vlsseg6_mask_nxv1f16:
3213 ; CHECK: # %bb.0: # %entry
3214 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3215 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
3216 ; CHECK-NEXT: vmv1r.v v8, v7
3217 ; CHECK-NEXT: vmv1r.v v9, v7
3218 ; CHECK-NEXT: vmv1r.v v10, v7
3219 ; CHECK-NEXT: vmv1r.v v11, v7
3220 ; CHECK-NEXT: vmv1r.v v12, v7
3221 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
3224 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3225 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3226 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3227 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3228 ret <vscale x 1 x half> %3
3231 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, i32)
3232 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3234 define <vscale x 1 x half> @test_vlsseg7_nxv1f16(ptr %base, i32 %offset, i32 %vl) {
3235 ; CHECK-LABEL: test_vlsseg7_nxv1f16:
3236 ; CHECK: # %bb.0: # %entry
3237 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3238 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
3241 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3242 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3243 ret <vscale x 1 x half> %1
3246 define <vscale x 1 x half> @test_vlsseg7_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3247 ; CHECK-LABEL: test_vlsseg7_mask_nxv1f16:
3248 ; CHECK: # %bb.0: # %entry
3249 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3250 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
3251 ; CHECK-NEXT: vmv1r.v v8, v7
3252 ; CHECK-NEXT: vmv1r.v v9, v7
3253 ; CHECK-NEXT: vmv1r.v v10, v7
3254 ; CHECK-NEXT: vmv1r.v v11, v7
3255 ; CHECK-NEXT: vmv1r.v v12, v7
3256 ; CHECK-NEXT: vmv1r.v v13, v7
3257 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
3260 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3261 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3262 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3263 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3264 ret <vscale x 1 x half> %3
3267 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, i32)
3268 declare {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3270 define <vscale x 1 x half> @test_vlsseg8_nxv1f16(ptr %base, i32 %offset, i32 %vl) {
3271 ; CHECK-LABEL: test_vlsseg8_nxv1f16:
3272 ; CHECK: # %bb.0: # %entry
3273 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
3274 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
3277 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3278 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 1
3279 ret <vscale x 1 x half> %1
3282 define <vscale x 1 x half> @test_vlsseg8_mask_nxv1f16(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3283 ; CHECK-LABEL: test_vlsseg8_mask_nxv1f16:
3284 ; CHECK: # %bb.0: # %entry
3285 ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
3286 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
3287 ; CHECK-NEXT: vmv1r.v v8, v7
3288 ; CHECK-NEXT: vmv1r.v v9, v7
3289 ; CHECK-NEXT: vmv1r.v v10, v7
3290 ; CHECK-NEXT: vmv1r.v v11, v7
3291 ; CHECK-NEXT: vmv1r.v v12, v7
3292 ; CHECK-NEXT: vmv1r.v v13, v7
3293 ; CHECK-NEXT: vmv1r.v v14, v7
3294 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
3297 %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
3298 %1 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %0, 0
3299 %2 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.mask.nxv1f16(<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1,<vscale x 1 x half> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3300 %3 = extractvalue {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} %2, 1
3301 ret <vscale x 1 x half> %3
3304 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, i32)
3305 declare {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3307 define <vscale x 1 x float> @test_vlsseg2_nxv1f32(ptr %base, i32 %offset, i32 %vl) {
3308 ; CHECK-LABEL: test_vlsseg2_nxv1f32:
3309 ; CHECK: # %bb.0: # %entry
3310 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3311 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
3314 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3315 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3316 ret <vscale x 1 x float> %1
3319 define <vscale x 1 x float> @test_vlsseg2_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3320 ; CHECK-LABEL: test_vlsseg2_mask_nxv1f32:
3321 ; CHECK: # %bb.0: # %entry
3322 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3323 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1
3324 ; CHECK-NEXT: vmv1r.v v8, v7
3325 ; CHECK-NEXT: vlsseg2e32.v v7, (a0), a1, v0.t
3328 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3329 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3330 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3331 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3332 ret <vscale x 1 x float> %3
3335 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, i32)
3336 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3338 define <vscale x 1 x float> @test_vlsseg3_nxv1f32(ptr %base, i32 %offset, i32 %vl) {
3339 ; CHECK-LABEL: test_vlsseg3_nxv1f32:
3340 ; CHECK: # %bb.0: # %entry
3341 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3342 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
3345 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3346 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3347 ret <vscale x 1 x float> %1
3350 define <vscale x 1 x float> @test_vlsseg3_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3351 ; CHECK-LABEL: test_vlsseg3_mask_nxv1f32:
3352 ; CHECK: # %bb.0: # %entry
3353 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3354 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1
3355 ; CHECK-NEXT: vmv1r.v v8, v7
3356 ; CHECK-NEXT: vmv1r.v v9, v7
3357 ; CHECK-NEXT: vlsseg3e32.v v7, (a0), a1, v0.t
3360 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3361 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3362 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3363 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3364 ret <vscale x 1 x float> %3
3367 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, i32)
3368 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3370 define <vscale x 1 x float> @test_vlsseg4_nxv1f32(ptr %base, i32 %offset, i32 %vl) {
3371 ; CHECK-LABEL: test_vlsseg4_nxv1f32:
3372 ; CHECK: # %bb.0: # %entry
3373 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3374 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
3377 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3378 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3379 ret <vscale x 1 x float> %1
3382 define <vscale x 1 x float> @test_vlsseg4_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3383 ; CHECK-LABEL: test_vlsseg4_mask_nxv1f32:
3384 ; CHECK: # %bb.0: # %entry
3385 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3386 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1
3387 ; CHECK-NEXT: vmv1r.v v8, v7
3388 ; CHECK-NEXT: vmv1r.v v9, v7
3389 ; CHECK-NEXT: vmv1r.v v10, v7
3390 ; CHECK-NEXT: vlsseg4e32.v v7, (a0), a1, v0.t
3393 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3394 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3395 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3396 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3397 ret <vscale x 1 x float> %3
3400 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, i32)
3401 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3403 define <vscale x 1 x float> @test_vlsseg5_nxv1f32(ptr %base, i32 %offset, i32 %vl) {
3404 ; CHECK-LABEL: test_vlsseg5_nxv1f32:
3405 ; CHECK: # %bb.0: # %entry
3406 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3407 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
3410 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3411 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3412 ret <vscale x 1 x float> %1
3415 define <vscale x 1 x float> @test_vlsseg5_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3416 ; CHECK-LABEL: test_vlsseg5_mask_nxv1f32:
3417 ; CHECK: # %bb.0: # %entry
3418 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3419 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1
3420 ; CHECK-NEXT: vmv1r.v v8, v7
3421 ; CHECK-NEXT: vmv1r.v v9, v7
3422 ; CHECK-NEXT: vmv1r.v v10, v7
3423 ; CHECK-NEXT: vmv1r.v v11, v7
3424 ; CHECK-NEXT: vlsseg5e32.v v7, (a0), a1, v0.t
3427 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3428 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3429 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3430 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3431 ret <vscale x 1 x float> %3
3434 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, i32)
3435 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3437 define <vscale x 1 x float> @test_vlsseg6_nxv1f32(ptr %base, i32 %offset, i32 %vl) {
3438 ; CHECK-LABEL: test_vlsseg6_nxv1f32:
3439 ; CHECK: # %bb.0: # %entry
3440 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3441 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
3444 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3445 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3446 ret <vscale x 1 x float> %1
3449 define <vscale x 1 x float> @test_vlsseg6_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3450 ; CHECK-LABEL: test_vlsseg6_mask_nxv1f32:
3451 ; CHECK: # %bb.0: # %entry
3452 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3453 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1
3454 ; CHECK-NEXT: vmv1r.v v8, v7
3455 ; CHECK-NEXT: vmv1r.v v9, v7
3456 ; CHECK-NEXT: vmv1r.v v10, v7
3457 ; CHECK-NEXT: vmv1r.v v11, v7
3458 ; CHECK-NEXT: vmv1r.v v12, v7
3459 ; CHECK-NEXT: vlsseg6e32.v v7, (a0), a1, v0.t
3462 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3463 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3464 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3465 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3466 ret <vscale x 1 x float> %3
3469 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, i32)
3470 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3472 define <vscale x 1 x float> @test_vlsseg7_nxv1f32(ptr %base, i32 %offset, i32 %vl) {
3473 ; CHECK-LABEL: test_vlsseg7_nxv1f32:
3474 ; CHECK: # %bb.0: # %entry
3475 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3476 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
3479 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3480 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3481 ret <vscale x 1 x float> %1
3484 define <vscale x 1 x float> @test_vlsseg7_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3485 ; CHECK-LABEL: test_vlsseg7_mask_nxv1f32:
3486 ; CHECK: # %bb.0: # %entry
3487 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3488 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1
3489 ; CHECK-NEXT: vmv1r.v v8, v7
3490 ; CHECK-NEXT: vmv1r.v v9, v7
3491 ; CHECK-NEXT: vmv1r.v v10, v7
3492 ; CHECK-NEXT: vmv1r.v v11, v7
3493 ; CHECK-NEXT: vmv1r.v v12, v7
3494 ; CHECK-NEXT: vmv1r.v v13, v7
3495 ; CHECK-NEXT: vlsseg7e32.v v7, (a0), a1, v0.t
3498 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3499 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3500 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3501 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3502 ret <vscale x 1 x float> %3
3505 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, i32)
3506 declare {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, ptr, i32, <vscale x 1 x i1>, i32, i32)
3508 define <vscale x 1 x float> @test_vlsseg8_nxv1f32(ptr %base, i32 %offset, i32 %vl) {
3509 ; CHECK-LABEL: test_vlsseg8_nxv1f32:
3510 ; CHECK: # %bb.0: # %entry
3511 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
3512 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
3515 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3516 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 1
3517 ret <vscale x 1 x float> %1
3520 define <vscale x 1 x float> @test_vlsseg8_mask_nxv1f32(ptr %base, i32 %offset, i32 %vl, <vscale x 1 x i1> %mask) {
3521 ; CHECK-LABEL: test_vlsseg8_mask_nxv1f32:
3522 ; CHECK: # %bb.0: # %entry
3523 ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
3524 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1
3525 ; CHECK-NEXT: vmv1r.v v8, v7
3526 ; CHECK-NEXT: vmv1r.v v9, v7
3527 ; CHECK-NEXT: vmv1r.v v10, v7
3528 ; CHECK-NEXT: vmv1r.v v11, v7
3529 ; CHECK-NEXT: vmv1r.v v12, v7
3530 ; CHECK-NEXT: vmv1r.v v13, v7
3531 ; CHECK-NEXT: vmv1r.v v14, v7
3532 ; CHECK-NEXT: vlsseg8e32.v v7, (a0), a1, v0.t
3535 %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
3536 %1 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %0, 0
3537 %2 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.mask.nxv1f32(<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1,<vscale x 1 x float> %1, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
3538 %3 = extractvalue {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} %2, 1
3539 ret <vscale x 1 x float> %3
3542 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, i32, i32)
3543 declare {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>, i32, i32)
3545 define <vscale x 8 x half> @test_vlsseg2_nxv8f16(ptr %base, i32 %offset, i32 %vl) {
3546 ; CHECK-LABEL: test_vlsseg2_nxv8f16:
3547 ; CHECK: # %bb.0: # %entry
3548 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3549 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
3552 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
3553 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
3554 ret <vscale x 8 x half> %1
3557 define <vscale x 8 x half> @test_vlsseg2_mask_nxv8f16(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
3558 ; CHECK-LABEL: test_vlsseg2_mask_nxv8f16:
3559 ; CHECK: # %bb.0: # %entry
3560 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
3561 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1
3562 ; CHECK-NEXT: vmv2r.v v8, v6
3563 ; CHECK-NEXT: vlsseg2e16.v v6, (a0), a1, v0.t
3566 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
3567 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
3568 %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3569 %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
3570 ret <vscale x 8 x half> %3
3573 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, i32, i32)
3574 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>, i32, i32)
3576 define <vscale x 8 x half> @test_vlsseg3_nxv8f16(ptr %base, i32 %offset, i32 %vl) {
3577 ; CHECK-LABEL: test_vlsseg3_nxv8f16:
3578 ; CHECK: # %bb.0: # %entry
3579 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3580 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
3583 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
3584 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
3585 ret <vscale x 8 x half> %1
3588 define <vscale x 8 x half> @test_vlsseg3_mask_nxv8f16(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
3589 ; CHECK-LABEL: test_vlsseg3_mask_nxv8f16:
3590 ; CHECK: # %bb.0: # %entry
3591 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
3592 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1
3593 ; CHECK-NEXT: vmv2r.v v8, v6
3594 ; CHECK-NEXT: vmv2r.v v10, v6
3595 ; CHECK-NEXT: vlsseg3e16.v v6, (a0), a1, v0.t
3598 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
3599 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
3600 %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3601 %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
3602 ret <vscale x 8 x half> %3
3605 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, i32, i32)
3606 declare {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, ptr, i32, <vscale x 8 x i1>, i32, i32)
3608 define <vscale x 8 x half> @test_vlsseg4_nxv8f16(ptr %base, i32 %offset, i32 %vl) {
3609 ; CHECK-LABEL: test_vlsseg4_nxv8f16:
3610 ; CHECK: # %bb.0: # %entry
3611 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
3612 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
3615 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
3616 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 1
3617 ret <vscale x 8 x half> %1
3620 define <vscale x 8 x half> @test_vlsseg4_mask_nxv8f16(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
3621 ; CHECK-LABEL: test_vlsseg4_mask_nxv8f16:
3622 ; CHECK: # %bb.0: # %entry
3623 ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, mu
3624 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1
3625 ; CHECK-NEXT: vmv2r.v v8, v6
3626 ; CHECK-NEXT: vmv2r.v v10, v6
3627 ; CHECK-NEXT: vmv2r.v v12, v6
3628 ; CHECK-NEXT: vlsseg4e16.v v6, (a0), a1, v0.t
3631 %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
3632 %1 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %0, 0
3633 %2 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.mask.nxv8f16(<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1,<vscale x 8 x half> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3634 %3 = extractvalue {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} %2, 1
3635 ret <vscale x 8 x half> %3
3638 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, i32, i32)
3639 declare {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x float>, ptr, i32, <vscale x 8 x i1>, i32, i32)
3641 define <vscale x 8 x float> @test_vlsseg2_nxv8f32(ptr %base, i32 %offset, i32 %vl) {
3642 ; CHECK-LABEL: test_vlsseg2_nxv8f32:
3643 ; CHECK: # %bb.0: # %entry
3644 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
3645 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
3648 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %offset, i32 %vl)
3649 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 1
3650 ret <vscale x 8 x float> %1
3653 define <vscale x 8 x float> @test_vlsseg2_mask_nxv8f32(ptr %base, i32 %offset, i32 %vl, <vscale x 8 x i1> %mask) {
3654 ; CHECK-LABEL: test_vlsseg2_mask_nxv8f32:
3655 ; CHECK: # %bb.0: # %entry
3656 ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, mu
3657 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1
3658 ; CHECK-NEXT: vmv4r.v v8, v4
3659 ; CHECK-NEXT: vlsseg2e32.v v4, (a0), a1, v0.t
3662 %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %offset, i32 %vl)
3663 %1 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %0, 0
3664 %2 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.mask.nxv8f32(<vscale x 8 x float> %1,<vscale x 8 x float> %1, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
3665 %3 = extractvalue {<vscale x 8 x float>,<vscale x 8 x float>} %2, 1
3666 ret <vscale x 8 x float> %3
3669 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, i32, i32)
3670 declare {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>, i32, i32)
3672 define <vscale x 2 x double> @test_vlsseg2_nxv2f64(ptr %base, i32 %offset, i32 %vl) {
3673 ; CHECK-LABEL: test_vlsseg2_nxv2f64:
3674 ; CHECK: # %bb.0: # %entry
3675 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3676 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
3679 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
3680 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
3681 ret <vscale x 2 x double> %1
3684 define <vscale x 2 x double> @test_vlsseg2_mask_nxv2f64(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
3685 ; CHECK-LABEL: test_vlsseg2_mask_nxv2f64:
3686 ; CHECK: # %bb.0: # %entry
3687 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
3688 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1
3689 ; CHECK-NEXT: vmv2r.v v8, v6
3690 ; CHECK-NEXT: vlsseg2e64.v v6, (a0), a1, v0.t
3693 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
3694 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
3695 %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3696 %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
3697 ret <vscale x 2 x double> %3
3700 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, i32, i32)
3701 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>, i32, i32)
3703 define <vscale x 2 x double> @test_vlsseg3_nxv2f64(ptr %base, i32 %offset, i32 %vl) {
3704 ; CHECK-LABEL: test_vlsseg3_nxv2f64:
3705 ; CHECK: # %bb.0: # %entry
3706 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3707 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
3710 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
3711 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
3712 ret <vscale x 2 x double> %1
3715 define <vscale x 2 x double> @test_vlsseg3_mask_nxv2f64(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
3716 ; CHECK-LABEL: test_vlsseg3_mask_nxv2f64:
3717 ; CHECK: # %bb.0: # %entry
3718 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
3719 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1
3720 ; CHECK-NEXT: vmv2r.v v8, v6
3721 ; CHECK-NEXT: vmv2r.v v10, v6
3722 ; CHECK-NEXT: vlsseg3e64.v v6, (a0), a1, v0.t
3725 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
3726 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
3727 %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3728 %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
3729 ret <vscale x 2 x double> %3
3732 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, i32, i32)
3733 declare {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, ptr, i32, <vscale x 2 x i1>, i32, i32)
3735 define <vscale x 2 x double> @test_vlsseg4_nxv2f64(ptr %base, i32 %offset, i32 %vl) {
3736 ; CHECK-LABEL: test_vlsseg4_nxv2f64:
3737 ; CHECK: # %bb.0: # %entry
3738 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
3739 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
3742 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
3743 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 1
3744 ret <vscale x 2 x double> %1
3747 define <vscale x 2 x double> @test_vlsseg4_mask_nxv2f64(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
3748 ; CHECK-LABEL: test_vlsseg4_mask_nxv2f64:
3749 ; CHECK: # %bb.0: # %entry
3750 ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, mu
3751 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1
3752 ; CHECK-NEXT: vmv2r.v v8, v6
3753 ; CHECK-NEXT: vmv2r.v v10, v6
3754 ; CHECK-NEXT: vmv2r.v v12, v6
3755 ; CHECK-NEXT: vlsseg4e64.v v6, (a0), a1, v0.t
3758 %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
3759 %1 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %0, 0
3760 %2 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.mask.nxv2f64(<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1,<vscale x 2 x double> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
3761 %3 = extractvalue {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} %2, 1
3762 ret <vscale x 2 x double> %3
3765 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, i32)
3766 declare {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>, i32, i32)
3768 define <vscale x 4 x half> @test_vlsseg2_nxv4f16(ptr %base, i32 %offset, i32 %vl) {
3769 ; CHECK-LABEL: test_vlsseg2_nxv4f16:
3770 ; CHECK: # %bb.0: # %entry
3771 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3772 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
3775 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3776 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
3777 ret <vscale x 4 x half> %1
3780 define <vscale x 4 x half> @test_vlsseg2_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
3781 ; CHECK-LABEL: test_vlsseg2_mask_nxv4f16:
3782 ; CHECK: # %bb.0: # %entry
3783 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
3784 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
3785 ; CHECK-NEXT: vmv1r.v v8, v7
3786 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
3789 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3790 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
3791 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
3792 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
3793 ret <vscale x 4 x half> %3
3796 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, i32)
3797 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>, i32, i32)
3799 define <vscale x 4 x half> @test_vlsseg3_nxv4f16(ptr %base, i32 %offset, i32 %vl) {
3800 ; CHECK-LABEL: test_vlsseg3_nxv4f16:
3801 ; CHECK: # %bb.0: # %entry
3802 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3803 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
3806 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3807 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
3808 ret <vscale x 4 x half> %1
3811 define <vscale x 4 x half> @test_vlsseg3_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
3812 ; CHECK-LABEL: test_vlsseg3_mask_nxv4f16:
3813 ; CHECK: # %bb.0: # %entry
3814 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
3815 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
3816 ; CHECK-NEXT: vmv1r.v v8, v7
3817 ; CHECK-NEXT: vmv1r.v v9, v7
3818 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
3821 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3822 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
3823 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
3824 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
3825 ret <vscale x 4 x half> %3
3828 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, i32)
3829 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>, i32, i32)
3831 define <vscale x 4 x half> @test_vlsseg4_nxv4f16(ptr %base, i32 %offset, i32 %vl) {
3832 ; CHECK-LABEL: test_vlsseg4_nxv4f16:
3833 ; CHECK: # %bb.0: # %entry
3834 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3835 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
3838 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3839 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
3840 ret <vscale x 4 x half> %1
3843 define <vscale x 4 x half> @test_vlsseg4_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
3844 ; CHECK-LABEL: test_vlsseg4_mask_nxv4f16:
3845 ; CHECK: # %bb.0: # %entry
3846 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
3847 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
3848 ; CHECK-NEXT: vmv1r.v v8, v7
3849 ; CHECK-NEXT: vmv1r.v v9, v7
3850 ; CHECK-NEXT: vmv1r.v v10, v7
3851 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
3854 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3855 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
3856 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
3857 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
3858 ret <vscale x 4 x half> %3
3861 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, i32)
3862 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>, i32, i32)
3864 define <vscale x 4 x half> @test_vlsseg5_nxv4f16(ptr %base, i32 %offset, i32 %vl) {
3865 ; CHECK-LABEL: test_vlsseg5_nxv4f16:
3866 ; CHECK: # %bb.0: # %entry
3867 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3868 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
3871 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3872 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
3873 ret <vscale x 4 x half> %1
3876 define <vscale x 4 x half> @test_vlsseg5_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
3877 ; CHECK-LABEL: test_vlsseg5_mask_nxv4f16:
3878 ; CHECK: # %bb.0: # %entry
3879 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
3880 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
3881 ; CHECK-NEXT: vmv1r.v v8, v7
3882 ; CHECK-NEXT: vmv1r.v v9, v7
3883 ; CHECK-NEXT: vmv1r.v v10, v7
3884 ; CHECK-NEXT: vmv1r.v v11, v7
3885 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
3888 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3889 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
3890 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
3891 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
3892 ret <vscale x 4 x half> %3
3895 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, i32)
3896 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>, i32, i32)
3898 define <vscale x 4 x half> @test_vlsseg6_nxv4f16(ptr %base, i32 %offset, i32 %vl) {
3899 ; CHECK-LABEL: test_vlsseg6_nxv4f16:
3900 ; CHECK: # %bb.0: # %entry
3901 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3902 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
3905 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3906 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
3907 ret <vscale x 4 x half> %1
3910 define <vscale x 4 x half> @test_vlsseg6_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
3911 ; CHECK-LABEL: test_vlsseg6_mask_nxv4f16:
3912 ; CHECK: # %bb.0: # %entry
3913 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
3914 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
3915 ; CHECK-NEXT: vmv1r.v v8, v7
3916 ; CHECK-NEXT: vmv1r.v v9, v7
3917 ; CHECK-NEXT: vmv1r.v v10, v7
3918 ; CHECK-NEXT: vmv1r.v v11, v7
3919 ; CHECK-NEXT: vmv1r.v v12, v7
3920 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
3923 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3924 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
3925 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
3926 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
3927 ret <vscale x 4 x half> %3
3930 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, i32)
3931 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>, i32, i32)
3933 define <vscale x 4 x half> @test_vlsseg7_nxv4f16(ptr %base, i32 %offset, i32 %vl) {
3934 ; CHECK-LABEL: test_vlsseg7_nxv4f16:
3935 ; CHECK: # %bb.0: # %entry
3936 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3937 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
3940 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3941 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
3942 ret <vscale x 4 x half> %1
3945 define <vscale x 4 x half> @test_vlsseg7_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
3946 ; CHECK-LABEL: test_vlsseg7_mask_nxv4f16:
3947 ; CHECK: # %bb.0: # %entry
3948 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
3949 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
3950 ; CHECK-NEXT: vmv1r.v v8, v7
3951 ; CHECK-NEXT: vmv1r.v v9, v7
3952 ; CHECK-NEXT: vmv1r.v v10, v7
3953 ; CHECK-NEXT: vmv1r.v v11, v7
3954 ; CHECK-NEXT: vmv1r.v v12, v7
3955 ; CHECK-NEXT: vmv1r.v v13, v7
3956 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
3959 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3960 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
3961 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
3962 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
3963 ret <vscale x 4 x half> %3
3966 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, i32)
3967 declare {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, ptr, i32, <vscale x 4 x i1>, i32, i32)
3969 define <vscale x 4 x half> @test_vlsseg8_nxv4f16(ptr %base, i32 %offset, i32 %vl) {
3970 ; CHECK-LABEL: test_vlsseg8_nxv4f16:
3971 ; CHECK: # %bb.0: # %entry
3972 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
3973 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
3976 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3977 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 1
3978 ret <vscale x 4 x half> %1
3981 define <vscale x 4 x half> @test_vlsseg8_mask_nxv4f16(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
3982 ; CHECK-LABEL: test_vlsseg8_mask_nxv4f16:
3983 ; CHECK: # %bb.0: # %entry
3984 ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, mu
3985 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
3986 ; CHECK-NEXT: vmv1r.v v8, v7
3987 ; CHECK-NEXT: vmv1r.v v9, v7
3988 ; CHECK-NEXT: vmv1r.v v10, v7
3989 ; CHECK-NEXT: vmv1r.v v11, v7
3990 ; CHECK-NEXT: vmv1r.v v12, v7
3991 ; CHECK-NEXT: vmv1r.v v13, v7
3992 ; CHECK-NEXT: vmv1r.v v14, v7
3993 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
3996 %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
3997 %1 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %0, 0
3998 %2 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.mask.nxv4f16(<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1,<vscale x 4 x half> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
3999 %3 = extractvalue {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} %2, 1
4000 ret <vscale x 4 x half> %3
4003 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, i32)
4004 declare {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>, i32, i32)
4006 define <vscale x 2 x half> @test_vlsseg2_nxv2f16(ptr %base, i32 %offset, i32 %vl) {
4007 ; CHECK-LABEL: test_vlsseg2_nxv2f16:
4008 ; CHECK: # %bb.0: # %entry
4009 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4010 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
4013 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4014 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4015 ret <vscale x 2 x half> %1
4018 define <vscale x 2 x half> @test_vlsseg2_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
4019 ; CHECK-LABEL: test_vlsseg2_mask_nxv2f16:
4020 ; CHECK: # %bb.0: # %entry
4021 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4022 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1
4023 ; CHECK-NEXT: vmv1r.v v8, v7
4024 ; CHECK-NEXT: vlsseg2e16.v v7, (a0), a1, v0.t
4027 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4028 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4029 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4030 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4031 ret <vscale x 2 x half> %3
4034 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, i32)
4035 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>, i32, i32)
4037 define <vscale x 2 x half> @test_vlsseg3_nxv2f16(ptr %base, i32 %offset, i32 %vl) {
4038 ; CHECK-LABEL: test_vlsseg3_nxv2f16:
4039 ; CHECK: # %bb.0: # %entry
4040 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4041 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
4044 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4045 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4046 ret <vscale x 2 x half> %1
4049 define <vscale x 2 x half> @test_vlsseg3_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
4050 ; CHECK-LABEL: test_vlsseg3_mask_nxv2f16:
4051 ; CHECK: # %bb.0: # %entry
4052 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4053 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1
4054 ; CHECK-NEXT: vmv1r.v v8, v7
4055 ; CHECK-NEXT: vmv1r.v v9, v7
4056 ; CHECK-NEXT: vlsseg3e16.v v7, (a0), a1, v0.t
4059 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4060 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4061 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4062 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4063 ret <vscale x 2 x half> %3
4066 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, i32)
4067 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>, i32, i32)
4069 define <vscale x 2 x half> @test_vlsseg4_nxv2f16(ptr %base, i32 %offset, i32 %vl) {
4070 ; CHECK-LABEL: test_vlsseg4_nxv2f16:
4071 ; CHECK: # %bb.0: # %entry
4072 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4073 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
4076 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4077 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4078 ret <vscale x 2 x half> %1
4081 define <vscale x 2 x half> @test_vlsseg4_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
4082 ; CHECK-LABEL: test_vlsseg4_mask_nxv2f16:
4083 ; CHECK: # %bb.0: # %entry
4084 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4085 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1
4086 ; CHECK-NEXT: vmv1r.v v8, v7
4087 ; CHECK-NEXT: vmv1r.v v9, v7
4088 ; CHECK-NEXT: vmv1r.v v10, v7
4089 ; CHECK-NEXT: vlsseg4e16.v v7, (a0), a1, v0.t
4092 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4093 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4094 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4095 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4096 ret <vscale x 2 x half> %3
4099 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, i32)
4100 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>, i32, i32)
4102 define <vscale x 2 x half> @test_vlsseg5_nxv2f16(ptr %base, i32 %offset, i32 %vl) {
4103 ; CHECK-LABEL: test_vlsseg5_nxv2f16:
4104 ; CHECK: # %bb.0: # %entry
4105 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4106 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
4109 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4110 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4111 ret <vscale x 2 x half> %1
4114 define <vscale x 2 x half> @test_vlsseg5_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
4115 ; CHECK-LABEL: test_vlsseg5_mask_nxv2f16:
4116 ; CHECK: # %bb.0: # %entry
4117 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4118 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1
4119 ; CHECK-NEXT: vmv1r.v v8, v7
4120 ; CHECK-NEXT: vmv1r.v v9, v7
4121 ; CHECK-NEXT: vmv1r.v v10, v7
4122 ; CHECK-NEXT: vmv1r.v v11, v7
4123 ; CHECK-NEXT: vlsseg5e16.v v7, (a0), a1, v0.t
4126 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4127 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4128 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4129 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4130 ret <vscale x 2 x half> %3
4133 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, i32)
4134 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>, i32, i32)
4136 define <vscale x 2 x half> @test_vlsseg6_nxv2f16(ptr %base, i32 %offset, i32 %vl) {
4137 ; CHECK-LABEL: test_vlsseg6_nxv2f16:
4138 ; CHECK: # %bb.0: # %entry
4139 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4140 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
4143 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4144 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4145 ret <vscale x 2 x half> %1
4148 define <vscale x 2 x half> @test_vlsseg6_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
4149 ; CHECK-LABEL: test_vlsseg6_mask_nxv2f16:
4150 ; CHECK: # %bb.0: # %entry
4151 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4152 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1
4153 ; CHECK-NEXT: vmv1r.v v8, v7
4154 ; CHECK-NEXT: vmv1r.v v9, v7
4155 ; CHECK-NEXT: vmv1r.v v10, v7
4156 ; CHECK-NEXT: vmv1r.v v11, v7
4157 ; CHECK-NEXT: vmv1r.v v12, v7
4158 ; CHECK-NEXT: vlsseg6e16.v v7, (a0), a1, v0.t
4161 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4162 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4163 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4164 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4165 ret <vscale x 2 x half> %3
4168 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, i32)
4169 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>, i32, i32)
4171 define <vscale x 2 x half> @test_vlsseg7_nxv2f16(ptr %base, i32 %offset, i32 %vl) {
4172 ; CHECK-LABEL: test_vlsseg7_nxv2f16:
4173 ; CHECK: # %bb.0: # %entry
4174 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4175 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
4178 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4179 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4180 ret <vscale x 2 x half> %1
4183 define <vscale x 2 x half> @test_vlsseg7_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
4184 ; CHECK-LABEL: test_vlsseg7_mask_nxv2f16:
4185 ; CHECK: # %bb.0: # %entry
4186 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4187 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1
4188 ; CHECK-NEXT: vmv1r.v v8, v7
4189 ; CHECK-NEXT: vmv1r.v v9, v7
4190 ; CHECK-NEXT: vmv1r.v v10, v7
4191 ; CHECK-NEXT: vmv1r.v v11, v7
4192 ; CHECK-NEXT: vmv1r.v v12, v7
4193 ; CHECK-NEXT: vmv1r.v v13, v7
4194 ; CHECK-NEXT: vlsseg7e16.v v7, (a0), a1, v0.t
4197 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4198 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4199 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4200 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4201 ret <vscale x 2 x half> %3
4204 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, i32)
4205 declare {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, ptr, i32, <vscale x 2 x i1>, i32, i32)
4207 define <vscale x 2 x half> @test_vlsseg8_nxv2f16(ptr %base, i32 %offset, i32 %vl) {
4208 ; CHECK-LABEL: test_vlsseg8_nxv2f16:
4209 ; CHECK: # %bb.0: # %entry
4210 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
4211 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
4214 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4215 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 1
4216 ret <vscale x 2 x half> %1
4219 define <vscale x 2 x half> @test_vlsseg8_mask_nxv2f16(ptr %base, i32 %offset, i32 %vl, <vscale x 2 x i1> %mask) {
4220 ; CHECK-LABEL: test_vlsseg8_mask_nxv2f16:
4221 ; CHECK: # %bb.0: # %entry
4222 ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
4223 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1
4224 ; CHECK-NEXT: vmv1r.v v8, v7
4225 ; CHECK-NEXT: vmv1r.v v9, v7
4226 ; CHECK-NEXT: vmv1r.v v10, v7
4227 ; CHECK-NEXT: vmv1r.v v11, v7
4228 ; CHECK-NEXT: vmv1r.v v12, v7
4229 ; CHECK-NEXT: vmv1r.v v13, v7
4230 ; CHECK-NEXT: vmv1r.v v14, v7
4231 ; CHECK-NEXT: vlsseg8e16.v v7, (a0), a1, v0.t
4234 %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
4235 %1 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %0, 0
4236 %2 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.mask.nxv2f16(<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1,<vscale x 2 x half> %1, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
4237 %3 = extractvalue {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} %2, 1
4238 ret <vscale x 2 x half> %3
4241 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, i32, i32)
4242 declare {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>, ptr, i32, <vscale x 4 x i1>, i32, i32)
4244 define <vscale x 4 x float> @test_vlsseg2_nxv4f32(ptr %base, i32 %offset, i32 %vl) {
4245 ; CHECK-LABEL: test_vlsseg2_nxv4f32:
4246 ; CHECK: # %bb.0: # %entry
4247 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
4248 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
4251 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
4252 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
4253 ret <vscale x 4 x float> %1
4256 define <vscale x 4 x float> @test_vlsseg2_mask_nxv4f32(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
4257 ; CHECK-LABEL: test_vlsseg2_mask_nxv4f32:
4258 ; CHECK: # %bb.0: # %entry
4259 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
4260 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1
4261 ; CHECK-NEXT: vmv2r.v v8, v6
4262 ; CHECK-NEXT: vlsseg2e32.v v6, (a0), a1, v0.t
4265 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
4266 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
4267 %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4268 %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
4269 ret <vscale x 4 x float> %3
4272 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, i32, i32)
4273 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, i32, <vscale x 4 x i1>, i32, i32)
4275 define <vscale x 4 x float> @test_vlsseg3_nxv4f32(ptr %base, i32 %offset, i32 %vl) {
4276 ; CHECK-LABEL: test_vlsseg3_nxv4f32:
4277 ; CHECK: # %bb.0: # %entry
4278 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
4279 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
4282 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
4283 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
4284 ret <vscale x 4 x float> %1
4287 define <vscale x 4 x float> @test_vlsseg3_mask_nxv4f32(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
4288 ; CHECK-LABEL: test_vlsseg3_mask_nxv4f32:
4289 ; CHECK: # %bb.0: # %entry
4290 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
4291 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1
4292 ; CHECK-NEXT: vmv2r.v v8, v6
4293 ; CHECK-NEXT: vmv2r.v v10, v6
4294 ; CHECK-NEXT: vlsseg3e32.v v6, (a0), a1, v0.t
4297 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
4298 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
4299 %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4300 %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
4301 ret <vscale x 4 x float> %3
4304 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, i32, i32)
4305 declare {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, ptr, i32, <vscale x 4 x i1>, i32, i32)
4307 define <vscale x 4 x float> @test_vlsseg4_nxv4f32(ptr %base, i32 %offset, i32 %vl) {
4308 ; CHECK-LABEL: test_vlsseg4_nxv4f32:
4309 ; CHECK: # %bb.0: # %entry
4310 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
4311 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
4314 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
4315 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 1
4316 ret <vscale x 4 x float> %1
4319 define <vscale x 4 x float> @test_vlsseg4_mask_nxv4f32(ptr %base, i32 %offset, i32 %vl, <vscale x 4 x i1> %mask) {
4320 ; CHECK-LABEL: test_vlsseg4_mask_nxv4f32:
4321 ; CHECK: # %bb.0: # %entry
4322 ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, mu
4323 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1
4324 ; CHECK-NEXT: vmv2r.v v8, v6
4325 ; CHECK-NEXT: vmv2r.v v10, v6
4326 ; CHECK-NEXT: vmv2r.v v12, v6
4327 ; CHECK-NEXT: vlsseg4e32.v v6, (a0), a1, v0.t
4330 %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
4331 %1 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %0, 0
4332 %2 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.mask.nxv4f32(<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1,<vscale x 4 x float> %1, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
4333 %3 = extractvalue {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} %2, 1
4334 ret <vscale x 4 x float> %3