1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+m -O0 < %s \
3 ; RUN: | FileCheck --check-prefix=SPILL-O0 %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+m -O2 < %s \
5 ; RUN: | FileCheck --check-prefix=SPILL-O2 %s
7 define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind {
8 ; SPILL-O0-LABEL: spill_zvlsseg_nxv1i32:
9 ; SPILL-O0: # %bb.0: # %entry
10 ; SPILL-O0-NEXT: addi sp, sp, -16
11 ; SPILL-O0-NEXT: csrr a2, vlenb
12 ; SPILL-O0-NEXT: slli a2, a2, 1
13 ; SPILL-O0-NEXT: sub sp, sp, a2
14 ; SPILL-O0-NEXT: # implicit-def: $v8
15 ; SPILL-O0-NEXT: # implicit-def: $v9
16 ; SPILL-O0-NEXT: # implicit-def: $v10
17 ; SPILL-O0-NEXT: # implicit-def: $v9
18 ; SPILL-O0-NEXT: # kill: def $v8 killed $v8 def $v8_v9
19 ; SPILL-O0-NEXT: vmv1r.v v9, v10
20 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
21 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
22 ; SPILL-O0-NEXT: vmv1r.v v8, v9
23 ; SPILL-O0-NEXT: addi a0, sp, 16
24 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
26 ; SPILL-O0-NEXT: #NO_APP
27 ; SPILL-O0-NEXT: addi a0, sp, 16
28 ; SPILL-O0-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
29 ; SPILL-O0-NEXT: csrr a0, vlenb
30 ; SPILL-O0-NEXT: slli a0, a0, 1
31 ; SPILL-O0-NEXT: add sp, sp, a0
32 ; SPILL-O0-NEXT: addi sp, sp, 16
35 ; SPILL-O2-LABEL: spill_zvlsseg_nxv1i32:
36 ; SPILL-O2: # %bb.0: # %entry
37 ; SPILL-O2-NEXT: addi sp, sp, -16
38 ; SPILL-O2-NEXT: csrr a2, vlenb
39 ; SPILL-O2-NEXT: slli a2, a2, 1
40 ; SPILL-O2-NEXT: sub sp, sp, a2
41 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
42 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
43 ; SPILL-O2-NEXT: addi a0, sp, 16
44 ; SPILL-O2-NEXT: csrr a1, vlenb
45 ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
46 ; SPILL-O2-NEXT: add a0, a0, a1
47 ; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
49 ; SPILL-O2-NEXT: #NO_APP
50 ; SPILL-O2-NEXT: addi a0, sp, 16
51 ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
52 ; SPILL-O2-NEXT: add a0, a0, a1
53 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
54 ; SPILL-O2-NEXT: csrr a0, vlenb
55 ; SPILL-O2-NEXT: slli a0, a0, 1
56 ; SPILL-O2-NEXT: add sp, sp, a0
57 ; SPILL-O2-NEXT: addi sp, sp, 16
60 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
61 call void asm sideeffect "",
62 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
63 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
64 ret <vscale x 1 x i32> %1
67 define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind {
68 ; SPILL-O0-LABEL: spill_zvlsseg_nxv2i32:
69 ; SPILL-O0: # %bb.0: # %entry
70 ; SPILL-O0-NEXT: addi sp, sp, -16
71 ; SPILL-O0-NEXT: csrr a2, vlenb
72 ; SPILL-O0-NEXT: slli a2, a2, 1
73 ; SPILL-O0-NEXT: sub sp, sp, a2
74 ; SPILL-O0-NEXT: # implicit-def: $v8
75 ; SPILL-O0-NEXT: # implicit-def: $v9
76 ; SPILL-O0-NEXT: # implicit-def: $v10
77 ; SPILL-O0-NEXT: # implicit-def: $v9
78 ; SPILL-O0-NEXT: # kill: def $v8 killed $v8 def $v8_v9
79 ; SPILL-O0-NEXT: vmv1r.v v9, v10
80 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, ta, ma
81 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
82 ; SPILL-O0-NEXT: vmv1r.v v8, v9
83 ; SPILL-O0-NEXT: addi a0, sp, 16
84 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
86 ; SPILL-O0-NEXT: #NO_APP
87 ; SPILL-O0-NEXT: addi a0, sp, 16
88 ; SPILL-O0-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
89 ; SPILL-O0-NEXT: csrr a0, vlenb
90 ; SPILL-O0-NEXT: slli a0, a0, 1
91 ; SPILL-O0-NEXT: add sp, sp, a0
92 ; SPILL-O0-NEXT: addi sp, sp, 16
95 ; SPILL-O2-LABEL: spill_zvlsseg_nxv2i32:
96 ; SPILL-O2: # %bb.0: # %entry
97 ; SPILL-O2-NEXT: addi sp, sp, -16
98 ; SPILL-O2-NEXT: csrr a2, vlenb
99 ; SPILL-O2-NEXT: slli a2, a2, 1
100 ; SPILL-O2-NEXT: sub sp, sp, a2
101 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, ma
102 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
103 ; SPILL-O2-NEXT: addi a0, sp, 16
104 ; SPILL-O2-NEXT: csrr a1, vlenb
105 ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
106 ; SPILL-O2-NEXT: add a0, a0, a1
107 ; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
108 ; SPILL-O2-NEXT: #APP
109 ; SPILL-O2-NEXT: #NO_APP
110 ; SPILL-O2-NEXT: addi a0, sp, 16
111 ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
112 ; SPILL-O2-NEXT: add a0, a0, a1
113 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
114 ; SPILL-O2-NEXT: csrr a0, vlenb
115 ; SPILL-O2-NEXT: slli a0, a0, 1
116 ; SPILL-O2-NEXT: add sp, sp, a0
117 ; SPILL-O2-NEXT: addi sp, sp, 16
120 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
121 call void asm sideeffect "",
122 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
123 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
124 ret <vscale x 2 x i32> %1
127 define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind {
128 ; SPILL-O0-LABEL: spill_zvlsseg_nxv4i32:
129 ; SPILL-O0: # %bb.0: # %entry
130 ; SPILL-O0-NEXT: addi sp, sp, -16
131 ; SPILL-O0-NEXT: csrr a2, vlenb
132 ; SPILL-O0-NEXT: slli a2, a2, 1
133 ; SPILL-O0-NEXT: sub sp, sp, a2
134 ; SPILL-O0-NEXT: # implicit-def: $v8m2
135 ; SPILL-O0-NEXT: # implicit-def: $v10m2
136 ; SPILL-O0-NEXT: # implicit-def: $v12m2
137 ; SPILL-O0-NEXT: # implicit-def: $v10m2
138 ; SPILL-O0-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
139 ; SPILL-O0-NEXT: vmv2r.v v10, v12
140 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, ma
141 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
142 ; SPILL-O0-NEXT: vmv2r.v v8, v10
143 ; SPILL-O0-NEXT: addi a0, sp, 16
144 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
145 ; SPILL-O0-NEXT: #APP
146 ; SPILL-O0-NEXT: #NO_APP
147 ; SPILL-O0-NEXT: addi a0, sp, 16
148 ; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
149 ; SPILL-O0-NEXT: csrr a0, vlenb
150 ; SPILL-O0-NEXT: slli a0, a0, 1
151 ; SPILL-O0-NEXT: add sp, sp, a0
152 ; SPILL-O0-NEXT: addi sp, sp, 16
155 ; SPILL-O2-LABEL: spill_zvlsseg_nxv4i32:
156 ; SPILL-O2: # %bb.0: # %entry
157 ; SPILL-O2-NEXT: addi sp, sp, -16
158 ; SPILL-O2-NEXT: csrr a2, vlenb
159 ; SPILL-O2-NEXT: slli a2, a2, 2
160 ; SPILL-O2-NEXT: sub sp, sp, a2
161 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma
162 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
163 ; SPILL-O2-NEXT: addi a0, sp, 16
164 ; SPILL-O2-NEXT: csrr a1, vlenb
165 ; SPILL-O2-NEXT: slli a1, a1, 1
166 ; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
167 ; SPILL-O2-NEXT: add a0, a0, a1
168 ; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
169 ; SPILL-O2-NEXT: #APP
170 ; SPILL-O2-NEXT: #NO_APP
171 ; SPILL-O2-NEXT: addi a0, sp, 16
172 ; SPILL-O2-NEXT: csrr a1, vlenb
173 ; SPILL-O2-NEXT: slli a1, a1, 1
174 ; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
175 ; SPILL-O2-NEXT: add a0, a0, a1
176 ; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
177 ; SPILL-O2-NEXT: csrr a0, vlenb
178 ; SPILL-O2-NEXT: slli a0, a0, 2
179 ; SPILL-O2-NEXT: add sp, sp, a0
180 ; SPILL-O2-NEXT: addi sp, sp, 16
183 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
184 call void asm sideeffect "",
185 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
186 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
187 ret <vscale x 4 x i32> %1
190 define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind {
191 ; SPILL-O0-LABEL: spill_zvlsseg_nxv8i32:
192 ; SPILL-O0: # %bb.0: # %entry
193 ; SPILL-O0-NEXT: addi sp, sp, -16
194 ; SPILL-O0-NEXT: csrr a2, vlenb
195 ; SPILL-O0-NEXT: slli a2, a2, 2
196 ; SPILL-O0-NEXT: sub sp, sp, a2
197 ; SPILL-O0-NEXT: # implicit-def: $v8m4
198 ; SPILL-O0-NEXT: # implicit-def: $v12m4
199 ; SPILL-O0-NEXT: # implicit-def: $v16m4
200 ; SPILL-O0-NEXT: # implicit-def: $v12m4
201 ; SPILL-O0-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
202 ; SPILL-O0-NEXT: vmv4r.v v12, v16
203 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, ta, ma
204 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
205 ; SPILL-O0-NEXT: vmv4r.v v8, v12
206 ; SPILL-O0-NEXT: addi a0, sp, 16
207 ; SPILL-O0-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
208 ; SPILL-O0-NEXT: #APP
209 ; SPILL-O0-NEXT: #NO_APP
210 ; SPILL-O0-NEXT: addi a0, sp, 16
211 ; SPILL-O0-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
212 ; SPILL-O0-NEXT: csrr a0, vlenb
213 ; SPILL-O0-NEXT: slli a0, a0, 2
214 ; SPILL-O0-NEXT: add sp, sp, a0
215 ; SPILL-O0-NEXT: addi sp, sp, 16
218 ; SPILL-O2-LABEL: spill_zvlsseg_nxv8i32:
219 ; SPILL-O2: # %bb.0: # %entry
220 ; SPILL-O2-NEXT: addi sp, sp, -16
221 ; SPILL-O2-NEXT: csrr a2, vlenb
222 ; SPILL-O2-NEXT: slli a2, a2, 3
223 ; SPILL-O2-NEXT: sub sp, sp, a2
224 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, ma
225 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
226 ; SPILL-O2-NEXT: addi a0, sp, 16
227 ; SPILL-O2-NEXT: csrr a1, vlenb
228 ; SPILL-O2-NEXT: slli a1, a1, 2
229 ; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
230 ; SPILL-O2-NEXT: add a0, a0, a1
231 ; SPILL-O2-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
232 ; SPILL-O2-NEXT: #APP
233 ; SPILL-O2-NEXT: #NO_APP
234 ; SPILL-O2-NEXT: addi a0, sp, 16
235 ; SPILL-O2-NEXT: csrr a1, vlenb
236 ; SPILL-O2-NEXT: slli a1, a1, 2
237 ; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
238 ; SPILL-O2-NEXT: add a0, a0, a1
239 ; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
240 ; SPILL-O2-NEXT: csrr a0, vlenb
241 ; SPILL-O2-NEXT: slli a0, a0, 3
242 ; SPILL-O2-NEXT: add sp, sp, a0
243 ; SPILL-O2-NEXT: addi sp, sp, 16
246 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %vl)
247 call void asm sideeffect "",
248 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
249 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
250 ret <vscale x 8 x i32> %1
253 define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind {
254 ; SPILL-O0-LABEL: spill_zvlsseg3_nxv4i32:
255 ; SPILL-O0: # %bb.0: # %entry
256 ; SPILL-O0-NEXT: addi sp, sp, -16
257 ; SPILL-O0-NEXT: csrr a2, vlenb
258 ; SPILL-O0-NEXT: slli a2, a2, 1
259 ; SPILL-O0-NEXT: sub sp, sp, a2
260 ; SPILL-O0-NEXT: # implicit-def: $v8m2
261 ; SPILL-O0-NEXT: # implicit-def: $v10m2
262 ; SPILL-O0-NEXT: # implicit-def: $v16m2
263 ; SPILL-O0-NEXT: # implicit-def: $v10m2
264 ; SPILL-O0-NEXT: # implicit-def: $v14m2
265 ; SPILL-O0-NEXT: # implicit-def: $v10m2
266 ; SPILL-O0-NEXT: # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
267 ; SPILL-O0-NEXT: vmv2r.v v10, v16
268 ; SPILL-O0-NEXT: vmv2r.v v12, v14
269 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, ta, ma
270 ; SPILL-O0-NEXT: vlseg3e32.v v8, (a0)
271 ; SPILL-O0-NEXT: vmv2r.v v8, v10
272 ; SPILL-O0-NEXT: addi a0, sp, 16
273 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
274 ; SPILL-O0-NEXT: #APP
275 ; SPILL-O0-NEXT: #NO_APP
276 ; SPILL-O0-NEXT: addi a0, sp, 16
277 ; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
278 ; SPILL-O0-NEXT: csrr a0, vlenb
279 ; SPILL-O0-NEXT: slli a0, a0, 1
280 ; SPILL-O0-NEXT: add sp, sp, a0
281 ; SPILL-O0-NEXT: addi sp, sp, 16
284 ; SPILL-O2-LABEL: spill_zvlsseg3_nxv4i32:
285 ; SPILL-O2: # %bb.0: # %entry
286 ; SPILL-O2-NEXT: addi sp, sp, -16
287 ; SPILL-O2-NEXT: csrr a2, vlenb
288 ; SPILL-O2-NEXT: li a3, 6
289 ; SPILL-O2-NEXT: mul a2, a2, a3
290 ; SPILL-O2-NEXT: sub sp, sp, a2
291 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma
292 ; SPILL-O2-NEXT: vlseg3e32.v v8, (a0)
293 ; SPILL-O2-NEXT: addi a0, sp, 16
294 ; SPILL-O2-NEXT: csrr a1, vlenb
295 ; SPILL-O2-NEXT: slli a1, a1, 1
296 ; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
297 ; SPILL-O2-NEXT: add a0, a0, a1
298 ; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
299 ; SPILL-O2-NEXT: add a0, a0, a1
300 ; SPILL-O2-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
301 ; SPILL-O2-NEXT: #APP
302 ; SPILL-O2-NEXT: #NO_APP
303 ; SPILL-O2-NEXT: addi a0, sp, 16
304 ; SPILL-O2-NEXT: csrr a1, vlenb
305 ; SPILL-O2-NEXT: slli a1, a1, 1
306 ; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
307 ; SPILL-O2-NEXT: add a0, a0, a1
308 ; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
309 ; SPILL-O2-NEXT: add a0, a0, a1
310 ; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
311 ; SPILL-O2-NEXT: csrr a0, vlenb
312 ; SPILL-O2-NEXT: li a1, 6
313 ; SPILL-O2-NEXT: mul a0, a0, a1
314 ; SPILL-O2-NEXT: add sp, sp, a0
315 ; SPILL-O2-NEXT: addi sp, sp, 16
318 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
319 call void asm sideeffect "",
320 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
321 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
322 ret <vscale x 4 x i32> %1
325 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
326 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
327 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i64)
328 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr , i64)
329 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i64)