1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+m -O0 < %s \
3 ; RUN: | FileCheck --check-prefix=SPILL-O0 %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+m -O2 < %s \
5 ; RUN: | FileCheck --check-prefix=SPILL-O2 %s
6 ; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -O2 < %s \
7 ; RUN: | FileCheck --check-prefix=SPILL-O2-VLEN128 %s
9 define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind {
10 ; SPILL-O0-LABEL: spill_zvlsseg_nxv1i32:
11 ; SPILL-O0: # %bb.0: # %entry
12 ; SPILL-O0-NEXT: addi sp, sp, -16
13 ; SPILL-O0-NEXT: csrr a2, vlenb
14 ; SPILL-O0-NEXT: slli a2, a2, 1
15 ; SPILL-O0-NEXT: sub sp, sp, a2
16 ; SPILL-O0-NEXT: # implicit-def: $v8_v9
17 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
18 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
19 ; SPILL-O0-NEXT: vmv1r.v v8, v9
20 ; SPILL-O0-NEXT: addi a0, sp, 16
21 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
23 ; SPILL-O0-NEXT: #NO_APP
24 ; SPILL-O0-NEXT: addi a0, sp, 16
25 ; SPILL-O0-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
26 ; SPILL-O0-NEXT: csrr a0, vlenb
27 ; SPILL-O0-NEXT: slli a0, a0, 1
28 ; SPILL-O0-NEXT: add sp, sp, a0
29 ; SPILL-O0-NEXT: addi sp, sp, 16
32 ; SPILL-O2-LABEL: spill_zvlsseg_nxv1i32:
33 ; SPILL-O2: # %bb.0: # %entry
34 ; SPILL-O2-NEXT: addi sp, sp, -16
35 ; SPILL-O2-NEXT: csrr a2, vlenb
36 ; SPILL-O2-NEXT: slli a2, a2, 1
37 ; SPILL-O2-NEXT: sub sp, sp, a2
38 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
39 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
40 ; SPILL-O2-NEXT: addi a0, sp, 16
41 ; SPILL-O2-NEXT: csrr a1, vlenb
42 ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
43 ; SPILL-O2-NEXT: add a0, a0, a1
44 ; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
46 ; SPILL-O2-NEXT: #NO_APP
47 ; SPILL-O2-NEXT: addi a0, sp, 16
48 ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
49 ; SPILL-O2-NEXT: add a0, a0, a1
50 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
51 ; SPILL-O2-NEXT: csrr a0, vlenb
52 ; SPILL-O2-NEXT: slli a0, a0, 1
53 ; SPILL-O2-NEXT: add sp, sp, a0
54 ; SPILL-O2-NEXT: addi sp, sp, 16
57 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg_nxv1i32:
58 ; SPILL-O2-VLEN128: # %bb.0: # %entry
59 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
60 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -32
61 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
62 ; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
63 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
64 ; SPILL-O2-VLEN128-NEXT: li a1, 16
65 ; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
66 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
67 ; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
68 ; SPILL-O2-VLEN128-NEXT: #APP
69 ; SPILL-O2-VLEN128-NEXT: #NO_APP
70 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
71 ; SPILL-O2-VLEN128-NEXT: li a1, 16
72 ; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
73 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
74 ; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
75 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
76 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
77 ; SPILL-O2-VLEN128-NEXT: ret
79 %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
80 call void asm sideeffect "",
81 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
82 %1 = extractvalue {<vscale x 1 x i32>,<vscale x 1 x i32>} %0, 1
83 ret <vscale x 1 x i32> %1
86 define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind {
87 ; SPILL-O0-LABEL: spill_zvlsseg_nxv2i32:
88 ; SPILL-O0: # %bb.0: # %entry
89 ; SPILL-O0-NEXT: addi sp, sp, -16
90 ; SPILL-O0-NEXT: csrr a2, vlenb
91 ; SPILL-O0-NEXT: slli a2, a2, 1
92 ; SPILL-O0-NEXT: sub sp, sp, a2
93 ; SPILL-O0-NEXT: # implicit-def: $v8_v9
94 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, tu, ma
95 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
96 ; SPILL-O0-NEXT: vmv1r.v v8, v9
97 ; SPILL-O0-NEXT: addi a0, sp, 16
98 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
100 ; SPILL-O0-NEXT: #NO_APP
101 ; SPILL-O0-NEXT: addi a0, sp, 16
102 ; SPILL-O0-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
103 ; SPILL-O0-NEXT: csrr a0, vlenb
104 ; SPILL-O0-NEXT: slli a0, a0, 1
105 ; SPILL-O0-NEXT: add sp, sp, a0
106 ; SPILL-O0-NEXT: addi sp, sp, 16
109 ; SPILL-O2-LABEL: spill_zvlsseg_nxv2i32:
110 ; SPILL-O2: # %bb.0: # %entry
111 ; SPILL-O2-NEXT: addi sp, sp, -16
112 ; SPILL-O2-NEXT: csrr a2, vlenb
113 ; SPILL-O2-NEXT: slli a2, a2, 1
114 ; SPILL-O2-NEXT: sub sp, sp, a2
115 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, ma
116 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
117 ; SPILL-O2-NEXT: addi a0, sp, 16
118 ; SPILL-O2-NEXT: csrr a1, vlenb
119 ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
120 ; SPILL-O2-NEXT: add a0, a0, a1
121 ; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
122 ; SPILL-O2-NEXT: #APP
123 ; SPILL-O2-NEXT: #NO_APP
124 ; SPILL-O2-NEXT: addi a0, sp, 16
125 ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
126 ; SPILL-O2-NEXT: add a0, a0, a1
127 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
128 ; SPILL-O2-NEXT: csrr a0, vlenb
129 ; SPILL-O2-NEXT: slli a0, a0, 1
130 ; SPILL-O2-NEXT: add sp, sp, a0
131 ; SPILL-O2-NEXT: addi sp, sp, 16
134 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg_nxv2i32:
135 ; SPILL-O2-VLEN128: # %bb.0: # %entry
136 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
137 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -32
138 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, m1, ta, ma
139 ; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
140 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
141 ; SPILL-O2-VLEN128-NEXT: li a1, 16
142 ; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
143 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
144 ; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
145 ; SPILL-O2-VLEN128-NEXT: #APP
146 ; SPILL-O2-VLEN128-NEXT: #NO_APP
147 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
148 ; SPILL-O2-VLEN128-NEXT: li a1, 16
149 ; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
150 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
151 ; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
152 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
153 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
154 ; SPILL-O2-VLEN128-NEXT: ret
156 %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
157 call void asm sideeffect "",
158 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
159 %1 = extractvalue {<vscale x 2 x i32>,<vscale x 2 x i32>} %0, 1
160 ret <vscale x 2 x i32> %1
163 define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind {
164 ; SPILL-O0-LABEL: spill_zvlsseg_nxv4i32:
165 ; SPILL-O0: # %bb.0: # %entry
166 ; SPILL-O0-NEXT: addi sp, sp, -16
167 ; SPILL-O0-NEXT: csrr a2, vlenb
168 ; SPILL-O0-NEXT: slli a2, a2, 1
169 ; SPILL-O0-NEXT: sub sp, sp, a2
170 ; SPILL-O0-NEXT: # implicit-def: $v8m2_v10m2
171 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma
172 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
173 ; SPILL-O0-NEXT: vmv2r.v v8, v10
174 ; SPILL-O0-NEXT: addi a0, sp, 16
175 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
176 ; SPILL-O0-NEXT: #APP
177 ; SPILL-O0-NEXT: #NO_APP
178 ; SPILL-O0-NEXT: addi a0, sp, 16
179 ; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
180 ; SPILL-O0-NEXT: csrr a0, vlenb
181 ; SPILL-O0-NEXT: slli a0, a0, 1
182 ; SPILL-O0-NEXT: add sp, sp, a0
183 ; SPILL-O0-NEXT: addi sp, sp, 16
186 ; SPILL-O2-LABEL: spill_zvlsseg_nxv4i32:
187 ; SPILL-O2: # %bb.0: # %entry
188 ; SPILL-O2-NEXT: addi sp, sp, -16
189 ; SPILL-O2-NEXT: csrr a2, vlenb
190 ; SPILL-O2-NEXT: slli a2, a2, 2
191 ; SPILL-O2-NEXT: sub sp, sp, a2
192 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma
193 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
194 ; SPILL-O2-NEXT: addi a0, sp, 16
195 ; SPILL-O2-NEXT: csrr a1, vlenb
196 ; SPILL-O2-NEXT: slli a1, a1, 1
197 ; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
198 ; SPILL-O2-NEXT: add a0, a0, a1
199 ; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
200 ; SPILL-O2-NEXT: #APP
201 ; SPILL-O2-NEXT: #NO_APP
202 ; SPILL-O2-NEXT: addi a0, sp, 16
203 ; SPILL-O2-NEXT: csrr a1, vlenb
204 ; SPILL-O2-NEXT: slli a1, a1, 1
205 ; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
206 ; SPILL-O2-NEXT: add a0, a0, a1
207 ; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
208 ; SPILL-O2-NEXT: csrr a0, vlenb
209 ; SPILL-O2-NEXT: slli a0, a0, 2
210 ; SPILL-O2-NEXT: add sp, sp, a0
211 ; SPILL-O2-NEXT: addi sp, sp, 16
214 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg_nxv4i32:
215 ; SPILL-O2-VLEN128: # %bb.0: # %entry
216 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
217 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -64
218 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, m2, ta, ma
219 ; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
220 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
221 ; SPILL-O2-VLEN128-NEXT: li a1, 32
222 ; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
223 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
224 ; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
225 ; SPILL-O2-VLEN128-NEXT: #APP
226 ; SPILL-O2-VLEN128-NEXT: #NO_APP
227 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
228 ; SPILL-O2-VLEN128-NEXT: li a1, 32
229 ; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
230 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
231 ; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
232 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 64
233 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
234 ; SPILL-O2-VLEN128-NEXT: ret
236 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
237 call void asm sideeffect "",
238 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
239 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
240 ret <vscale x 4 x i32> %1
243 define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind {
244 ; SPILL-O0-LABEL: spill_zvlsseg_nxv8i32:
245 ; SPILL-O0: # %bb.0: # %entry
246 ; SPILL-O0-NEXT: addi sp, sp, -16
247 ; SPILL-O0-NEXT: csrr a2, vlenb
248 ; SPILL-O0-NEXT: slli a2, a2, 2
249 ; SPILL-O0-NEXT: sub sp, sp, a2
250 ; SPILL-O0-NEXT: # implicit-def: $v8m4_v12m4
251 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, tu, ma
252 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
253 ; SPILL-O0-NEXT: vmv4r.v v8, v12
254 ; SPILL-O0-NEXT: addi a0, sp, 16
255 ; SPILL-O0-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
256 ; SPILL-O0-NEXT: #APP
257 ; SPILL-O0-NEXT: #NO_APP
258 ; SPILL-O0-NEXT: addi a0, sp, 16
259 ; SPILL-O0-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
260 ; SPILL-O0-NEXT: csrr a0, vlenb
261 ; SPILL-O0-NEXT: slli a0, a0, 2
262 ; SPILL-O0-NEXT: add sp, sp, a0
263 ; SPILL-O0-NEXT: addi sp, sp, 16
266 ; SPILL-O2-LABEL: spill_zvlsseg_nxv8i32:
267 ; SPILL-O2: # %bb.0: # %entry
268 ; SPILL-O2-NEXT: addi sp, sp, -16
269 ; SPILL-O2-NEXT: csrr a2, vlenb
270 ; SPILL-O2-NEXT: slli a2, a2, 3
271 ; SPILL-O2-NEXT: sub sp, sp, a2
272 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, ma
273 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
274 ; SPILL-O2-NEXT: addi a0, sp, 16
275 ; SPILL-O2-NEXT: csrr a1, vlenb
276 ; SPILL-O2-NEXT: slli a1, a1, 2
277 ; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
278 ; SPILL-O2-NEXT: add a0, a0, a1
279 ; SPILL-O2-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
280 ; SPILL-O2-NEXT: #APP
281 ; SPILL-O2-NEXT: #NO_APP
282 ; SPILL-O2-NEXT: addi a0, sp, 16
283 ; SPILL-O2-NEXT: csrr a1, vlenb
284 ; SPILL-O2-NEXT: slli a1, a1, 2
285 ; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
286 ; SPILL-O2-NEXT: add a0, a0, a1
287 ; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
288 ; SPILL-O2-NEXT: csrr a0, vlenb
289 ; SPILL-O2-NEXT: slli a0, a0, 3
290 ; SPILL-O2-NEXT: add sp, sp, a0
291 ; SPILL-O2-NEXT: addi sp, sp, 16
294 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg_nxv8i32:
295 ; SPILL-O2-VLEN128: # %bb.0: # %entry
296 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
297 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -128
298 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, m4, ta, ma
299 ; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
300 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
301 ; SPILL-O2-VLEN128-NEXT: li a1, 64
302 ; SPILL-O2-VLEN128-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
303 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
304 ; SPILL-O2-VLEN128-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
305 ; SPILL-O2-VLEN128-NEXT: #APP
306 ; SPILL-O2-VLEN128-NEXT: #NO_APP
307 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
308 ; SPILL-O2-VLEN128-NEXT: li a1, 64
309 ; SPILL-O2-VLEN128-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
310 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
311 ; SPILL-O2-VLEN128-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
312 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 128
313 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
314 ; SPILL-O2-VLEN128-NEXT: ret
316 %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %vl)
317 call void asm sideeffect "",
318 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
319 %1 = extractvalue {<vscale x 8 x i32>,<vscale x 8 x i32>} %0, 1
320 ret <vscale x 8 x i32> %1
323 define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind {
324 ; SPILL-O0-LABEL: spill_zvlsseg3_nxv4i32:
325 ; SPILL-O0: # %bb.0: # %entry
326 ; SPILL-O0-NEXT: addi sp, sp, -16
327 ; SPILL-O0-NEXT: csrr a2, vlenb
328 ; SPILL-O0-NEXT: slli a2, a2, 1
329 ; SPILL-O0-NEXT: sub sp, sp, a2
330 ; SPILL-O0-NEXT: # implicit-def: $v8m2_v10m2_v12m2
331 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma
332 ; SPILL-O0-NEXT: vlseg3e32.v v8, (a0)
333 ; SPILL-O0-NEXT: vmv2r.v v8, v10
334 ; SPILL-O0-NEXT: addi a0, sp, 16
335 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
336 ; SPILL-O0-NEXT: #APP
337 ; SPILL-O0-NEXT: #NO_APP
338 ; SPILL-O0-NEXT: addi a0, sp, 16
339 ; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
340 ; SPILL-O0-NEXT: csrr a0, vlenb
341 ; SPILL-O0-NEXT: slli a0, a0, 1
342 ; SPILL-O0-NEXT: add sp, sp, a0
343 ; SPILL-O0-NEXT: addi sp, sp, 16
346 ; SPILL-O2-LABEL: spill_zvlsseg3_nxv4i32:
347 ; SPILL-O2: # %bb.0: # %entry
348 ; SPILL-O2-NEXT: addi sp, sp, -16
349 ; SPILL-O2-NEXT: csrr a2, vlenb
350 ; SPILL-O2-NEXT: li a3, 6
351 ; SPILL-O2-NEXT: mul a2, a2, a3
352 ; SPILL-O2-NEXT: sub sp, sp, a2
353 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma
354 ; SPILL-O2-NEXT: vlseg3e32.v v8, (a0)
355 ; SPILL-O2-NEXT: addi a0, sp, 16
356 ; SPILL-O2-NEXT: csrr a1, vlenb
357 ; SPILL-O2-NEXT: slli a1, a1, 1
358 ; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
359 ; SPILL-O2-NEXT: add a0, a0, a1
360 ; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
361 ; SPILL-O2-NEXT: add a0, a0, a1
362 ; SPILL-O2-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
363 ; SPILL-O2-NEXT: #APP
364 ; SPILL-O2-NEXT: #NO_APP
365 ; SPILL-O2-NEXT: addi a0, sp, 16
366 ; SPILL-O2-NEXT: csrr a1, vlenb
367 ; SPILL-O2-NEXT: slli a1, a1, 1
368 ; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
369 ; SPILL-O2-NEXT: add a0, a0, a1
370 ; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
371 ; SPILL-O2-NEXT: add a0, a0, a1
372 ; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
373 ; SPILL-O2-NEXT: csrr a0, vlenb
374 ; SPILL-O2-NEXT: li a1, 6
375 ; SPILL-O2-NEXT: mul a0, a0, a1
376 ; SPILL-O2-NEXT: add sp, sp, a0
377 ; SPILL-O2-NEXT: addi sp, sp, 16
380 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg3_nxv4i32:
381 ; SPILL-O2-VLEN128: # %bb.0: # %entry
382 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
383 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -96
384 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, m2, ta, ma
385 ; SPILL-O2-VLEN128-NEXT: vlseg3e32.v v8, (a0)
386 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
387 ; SPILL-O2-VLEN128-NEXT: li a1, 32
388 ; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
389 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
390 ; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
391 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
392 ; SPILL-O2-VLEN128-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
393 ; SPILL-O2-VLEN128-NEXT: #APP
394 ; SPILL-O2-VLEN128-NEXT: #NO_APP
395 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
396 ; SPILL-O2-VLEN128-NEXT: li a1, 32
397 ; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
398 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
399 ; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
400 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
401 ; SPILL-O2-VLEN128-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
402 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 96
403 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
404 ; SPILL-O2-VLEN128-NEXT: ret
406 %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
407 call void asm sideeffect "",
408 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
409 %1 = extractvalue {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} %0, 1
410 ret <vscale x 4 x i32> %1
413 declare {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i32>, ptr , i64)
414 declare {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i32>, ptr , i64)
415 declare {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i64)
416 declare {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i32>, ptr , i64)
417 declare {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, ptr , i64)