1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+m -O0 < %s \
3 ; RUN: | FileCheck --check-prefix=SPILL-O0 %s
4 ; RUN: llc -mtriple=riscv64 -mattr=+v -mattr=+m -O2 < %s \
5 ; RUN: | FileCheck --check-prefix=SPILL-O2 %s
6 ; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-max=128 -O2 < %s \
7 ; RUN: | FileCheck --check-prefix=SPILL-O2-VLEN128 %s
9 define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind {
10 ; SPILL-O0-LABEL: spill_zvlsseg_nxv1i32:
11 ; SPILL-O0: # %bb.0: # %entry
12 ; SPILL-O0-NEXT: addi sp, sp, -16
13 ; SPILL-O0-NEXT: csrr a2, vlenb
14 ; SPILL-O0-NEXT: sub sp, sp, a2
15 ; SPILL-O0-NEXT: # implicit-def: $v8_v9
16 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
17 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
18 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
19 ; SPILL-O0-NEXT: vmv1r.v v8, v9
20 ; SPILL-O0-NEXT: addi a0, sp, 16
21 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
23 ; SPILL-O0-NEXT: #NO_APP
24 ; SPILL-O0-NEXT: addi a0, sp, 16
25 ; SPILL-O0-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
26 ; SPILL-O0-NEXT: csrr a0, vlenb
27 ; SPILL-O0-NEXT: add sp, sp, a0
28 ; SPILL-O0-NEXT: addi sp, sp, 16
31 ; SPILL-O2-LABEL: spill_zvlsseg_nxv1i32:
32 ; SPILL-O2: # %bb.0: # %entry
33 ; SPILL-O2-NEXT: addi sp, sp, -16
34 ; SPILL-O2-NEXT: csrr a2, vlenb
35 ; SPILL-O2-NEXT: slli a2, a2, 1
36 ; SPILL-O2-NEXT: sub sp, sp, a2
37 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
38 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
39 ; SPILL-O2-NEXT: addi a0, sp, 16
40 ; SPILL-O2-NEXT: csrr a1, vlenb
41 ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
42 ; SPILL-O2-NEXT: add a0, a0, a1
43 ; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
45 ; SPILL-O2-NEXT: #NO_APP
46 ; SPILL-O2-NEXT: addi a0, sp, 16
47 ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
48 ; SPILL-O2-NEXT: add a0, a0, a1
49 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
50 ; SPILL-O2-NEXT: csrr a0, vlenb
51 ; SPILL-O2-NEXT: slli a0, a0, 1
52 ; SPILL-O2-NEXT: add sp, sp, a0
53 ; SPILL-O2-NEXT: addi sp, sp, 16
56 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg_nxv1i32:
57 ; SPILL-O2-VLEN128: # %bb.0: # %entry
58 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
59 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -32
60 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
61 ; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
62 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
63 ; SPILL-O2-VLEN128-NEXT: li a1, 16
64 ; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
65 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
66 ; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
67 ; SPILL-O2-VLEN128-NEXT: #APP
68 ; SPILL-O2-VLEN128-NEXT: #NO_APP
69 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
70 ; SPILL-O2-VLEN128-NEXT: li a1, 16
71 ; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
72 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
73 ; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
74 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
75 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
76 ; SPILL-O2-VLEN128-NEXT: ret
78 %0 = tail call target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) undef, ptr %base, i64 %vl, i64 5)
79 call void asm sideeffect "",
80 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
81 %1 = call <vscale x 1 x i32> @llvm.riscv.tuple.extract.nxv1i32.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2) %0, i32 1)
82 ret <vscale x 1 x i32> %1
85 define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind {
86 ; SPILL-O0-LABEL: spill_zvlsseg_nxv2i32:
87 ; SPILL-O0: # %bb.0: # %entry
88 ; SPILL-O0-NEXT: addi sp, sp, -16
89 ; SPILL-O0-NEXT: csrr a2, vlenb
90 ; SPILL-O0-NEXT: sub sp, sp, a2
91 ; SPILL-O0-NEXT: # implicit-def: $v8_v9
92 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, tu, ma
93 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
94 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m1, tu, ma
95 ; SPILL-O0-NEXT: vmv1r.v v8, v9
96 ; SPILL-O0-NEXT: addi a0, sp, 16
97 ; SPILL-O0-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
99 ; SPILL-O0-NEXT: #NO_APP
100 ; SPILL-O0-NEXT: addi a0, sp, 16
101 ; SPILL-O0-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
102 ; SPILL-O0-NEXT: csrr a0, vlenb
103 ; SPILL-O0-NEXT: add sp, sp, a0
104 ; SPILL-O0-NEXT: addi sp, sp, 16
107 ; SPILL-O2-LABEL: spill_zvlsseg_nxv2i32:
108 ; SPILL-O2: # %bb.0: # %entry
109 ; SPILL-O2-NEXT: addi sp, sp, -16
110 ; SPILL-O2-NEXT: csrr a2, vlenb
111 ; SPILL-O2-NEXT: slli a2, a2, 1
112 ; SPILL-O2-NEXT: sub sp, sp, a2
113 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m1, ta, ma
114 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
115 ; SPILL-O2-NEXT: addi a0, sp, 16
116 ; SPILL-O2-NEXT: csrr a1, vlenb
117 ; SPILL-O2-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
118 ; SPILL-O2-NEXT: add a0, a0, a1
119 ; SPILL-O2-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
120 ; SPILL-O2-NEXT: #APP
121 ; SPILL-O2-NEXT: #NO_APP
122 ; SPILL-O2-NEXT: addi a0, sp, 16
123 ; SPILL-O2-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
124 ; SPILL-O2-NEXT: add a0, a0, a1
125 ; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
126 ; SPILL-O2-NEXT: csrr a0, vlenb
127 ; SPILL-O2-NEXT: slli a0, a0, 1
128 ; SPILL-O2-NEXT: add sp, sp, a0
129 ; SPILL-O2-NEXT: addi sp, sp, 16
132 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg_nxv2i32:
133 ; SPILL-O2-VLEN128: # %bb.0: # %entry
134 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
135 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -32
136 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, m1, ta, ma
137 ; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
138 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
139 ; SPILL-O2-VLEN128-NEXT: li a1, 16
140 ; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
141 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
142 ; SPILL-O2-VLEN128-NEXT: vs1r.v v9, (a0) # Unknown-size Folded Spill
143 ; SPILL-O2-VLEN128-NEXT: #APP
144 ; SPILL-O2-VLEN128-NEXT: #NO_APP
145 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
146 ; SPILL-O2-VLEN128-NEXT: li a1, 16
147 ; SPILL-O2-VLEN128-NEXT: vl1r.v v7, (a0) # Unknown-size Folded Reload
148 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
149 ; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
150 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
151 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
152 ; SPILL-O2-VLEN128-NEXT: ret
154 %0 = tail call target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) undef, ptr %base, i64 %vl, i64 5)
155 call void asm sideeffect "",
156 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
157 %1 = call <vscale x 2 x i32> @llvm.riscv.tuple.extract.nxv2i32.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2) %0, i32 1)
158 ret <vscale x 2 x i32> %1
161 define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind {
162 ; SPILL-O0-LABEL: spill_zvlsseg_nxv4i32:
163 ; SPILL-O0: # %bb.0: # %entry
164 ; SPILL-O0-NEXT: addi sp, sp, -16
165 ; SPILL-O0-NEXT: csrr a2, vlenb
166 ; SPILL-O0-NEXT: slli a2, a2, 1
167 ; SPILL-O0-NEXT: sub sp, sp, a2
168 ; SPILL-O0-NEXT: # implicit-def: $v8m2_v10m2
169 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma
170 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
171 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma
172 ; SPILL-O0-NEXT: vmv2r.v v8, v10
173 ; SPILL-O0-NEXT: addi a0, sp, 16
174 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
175 ; SPILL-O0-NEXT: #APP
176 ; SPILL-O0-NEXT: #NO_APP
177 ; SPILL-O0-NEXT: addi a0, sp, 16
178 ; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
179 ; SPILL-O0-NEXT: csrr a0, vlenb
180 ; SPILL-O0-NEXT: slli a0, a0, 1
181 ; SPILL-O0-NEXT: add sp, sp, a0
182 ; SPILL-O0-NEXT: addi sp, sp, 16
185 ; SPILL-O2-LABEL: spill_zvlsseg_nxv4i32:
186 ; SPILL-O2: # %bb.0: # %entry
187 ; SPILL-O2-NEXT: addi sp, sp, -16
188 ; SPILL-O2-NEXT: csrr a2, vlenb
189 ; SPILL-O2-NEXT: slli a2, a2, 2
190 ; SPILL-O2-NEXT: sub sp, sp, a2
191 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma
192 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
193 ; SPILL-O2-NEXT: addi a0, sp, 16
194 ; SPILL-O2-NEXT: csrr a1, vlenb
195 ; SPILL-O2-NEXT: slli a1, a1, 1
196 ; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
197 ; SPILL-O2-NEXT: add a0, a0, a1
198 ; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
199 ; SPILL-O2-NEXT: #APP
200 ; SPILL-O2-NEXT: #NO_APP
201 ; SPILL-O2-NEXT: addi a0, sp, 16
202 ; SPILL-O2-NEXT: csrr a1, vlenb
203 ; SPILL-O2-NEXT: slli a1, a1, 1
204 ; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
205 ; SPILL-O2-NEXT: add a0, a0, a1
206 ; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
207 ; SPILL-O2-NEXT: csrr a0, vlenb
208 ; SPILL-O2-NEXT: slli a0, a0, 2
209 ; SPILL-O2-NEXT: add sp, sp, a0
210 ; SPILL-O2-NEXT: addi sp, sp, 16
213 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg_nxv4i32:
214 ; SPILL-O2-VLEN128: # %bb.0: # %entry
215 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
216 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -64
217 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, m2, ta, ma
218 ; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
219 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
220 ; SPILL-O2-VLEN128-NEXT: li a1, 32
221 ; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
222 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
223 ; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
224 ; SPILL-O2-VLEN128-NEXT: #APP
225 ; SPILL-O2-VLEN128-NEXT: #NO_APP
226 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
227 ; SPILL-O2-VLEN128-NEXT: li a1, 32
228 ; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
229 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
230 ; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
231 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 64
232 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
233 ; SPILL-O2-VLEN128-NEXT: ret
235 %0 = tail call target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) undef, ptr %base, i64 %vl, i64 5)
236 call void asm sideeffect "",
237 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
238 %1 = call <vscale x 4 x i32> @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2) %0, i32 1)
239 ret <vscale x 4 x i32> %1
242 define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind {
243 ; SPILL-O0-LABEL: spill_zvlsseg_nxv8i32:
244 ; SPILL-O0: # %bb.0: # %entry
245 ; SPILL-O0-NEXT: addi sp, sp, -16
246 ; SPILL-O0-NEXT: csrr a2, vlenb
247 ; SPILL-O0-NEXT: slli a2, a2, 2
248 ; SPILL-O0-NEXT: sub sp, sp, a2
249 ; SPILL-O0-NEXT: # implicit-def: $v8m4_v12m4
250 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, tu, ma
251 ; SPILL-O0-NEXT: vlseg2e32.v v8, (a0)
252 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m4, tu, ma
253 ; SPILL-O0-NEXT: vmv4r.v v8, v12
254 ; SPILL-O0-NEXT: addi a0, sp, 16
255 ; SPILL-O0-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
256 ; SPILL-O0-NEXT: #APP
257 ; SPILL-O0-NEXT: #NO_APP
258 ; SPILL-O0-NEXT: addi a0, sp, 16
259 ; SPILL-O0-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
260 ; SPILL-O0-NEXT: csrr a0, vlenb
261 ; SPILL-O0-NEXT: slli a0, a0, 2
262 ; SPILL-O0-NEXT: add sp, sp, a0
263 ; SPILL-O0-NEXT: addi sp, sp, 16
266 ; SPILL-O2-LABEL: spill_zvlsseg_nxv8i32:
267 ; SPILL-O2: # %bb.0: # %entry
268 ; SPILL-O2-NEXT: addi sp, sp, -16
269 ; SPILL-O2-NEXT: csrr a2, vlenb
270 ; SPILL-O2-NEXT: slli a2, a2, 3
271 ; SPILL-O2-NEXT: sub sp, sp, a2
272 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m4, ta, ma
273 ; SPILL-O2-NEXT: vlseg2e32.v v8, (a0)
274 ; SPILL-O2-NEXT: addi a0, sp, 16
275 ; SPILL-O2-NEXT: csrr a1, vlenb
276 ; SPILL-O2-NEXT: slli a1, a1, 2
277 ; SPILL-O2-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
278 ; SPILL-O2-NEXT: add a0, a0, a1
279 ; SPILL-O2-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
280 ; SPILL-O2-NEXT: #APP
281 ; SPILL-O2-NEXT: #NO_APP
282 ; SPILL-O2-NEXT: addi a0, sp, 16
283 ; SPILL-O2-NEXT: csrr a1, vlenb
284 ; SPILL-O2-NEXT: slli a1, a1, 2
285 ; SPILL-O2-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
286 ; SPILL-O2-NEXT: add a0, a0, a1
287 ; SPILL-O2-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
288 ; SPILL-O2-NEXT: csrr a0, vlenb
289 ; SPILL-O2-NEXT: slli a0, a0, 3
290 ; SPILL-O2-NEXT: add sp, sp, a0
291 ; SPILL-O2-NEXT: addi sp, sp, 16
294 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg_nxv8i32:
295 ; SPILL-O2-VLEN128: # %bb.0: # %entry
296 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
297 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -128
298 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, m4, ta, ma
299 ; SPILL-O2-VLEN128-NEXT: vlseg2e32.v v8, (a0)
300 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
301 ; SPILL-O2-VLEN128-NEXT: li a1, 64
302 ; SPILL-O2-VLEN128-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
303 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
304 ; SPILL-O2-VLEN128-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
305 ; SPILL-O2-VLEN128-NEXT: #APP
306 ; SPILL-O2-VLEN128-NEXT: #NO_APP
307 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
308 ; SPILL-O2-VLEN128-NEXT: li a1, 64
309 ; SPILL-O2-VLEN128-NEXT: vl4r.v v4, (a0) # Unknown-size Folded Reload
310 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
311 ; SPILL-O2-VLEN128-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
312 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 128
313 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
314 ; SPILL-O2-VLEN128-NEXT: ret
316 %0 = tail call target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) undef, ptr %base, i64 %vl, i64 5)
317 call void asm sideeffect "",
318 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
319 %1 = call <vscale x 8 x i32> @llvm.riscv.tuple.extract.nxv8i32.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2) %0, i32 1)
320 ret <vscale x 8 x i32> %1
323 define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind {
324 ; SPILL-O0-LABEL: spill_zvlsseg3_nxv4i32:
325 ; SPILL-O0: # %bb.0: # %entry
326 ; SPILL-O0-NEXT: addi sp, sp, -16
327 ; SPILL-O0-NEXT: csrr a2, vlenb
328 ; SPILL-O0-NEXT: slli a2, a2, 1
329 ; SPILL-O0-NEXT: sub sp, sp, a2
330 ; SPILL-O0-NEXT: # implicit-def: $v8m2_v10m2_v12m2
331 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma
332 ; SPILL-O0-NEXT: vlseg3e32.v v8, (a0)
333 ; SPILL-O0-NEXT: vsetvli zero, a1, e32, m2, tu, ma
334 ; SPILL-O0-NEXT: vmv2r.v v8, v10
335 ; SPILL-O0-NEXT: addi a0, sp, 16
336 ; SPILL-O0-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
337 ; SPILL-O0-NEXT: #APP
338 ; SPILL-O0-NEXT: #NO_APP
339 ; SPILL-O0-NEXT: addi a0, sp, 16
340 ; SPILL-O0-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
341 ; SPILL-O0-NEXT: csrr a0, vlenb
342 ; SPILL-O0-NEXT: slli a0, a0, 1
343 ; SPILL-O0-NEXT: add sp, sp, a0
344 ; SPILL-O0-NEXT: addi sp, sp, 16
347 ; SPILL-O2-LABEL: spill_zvlsseg3_nxv4i32:
348 ; SPILL-O2: # %bb.0: # %entry
349 ; SPILL-O2-NEXT: addi sp, sp, -16
350 ; SPILL-O2-NEXT: csrr a2, vlenb
351 ; SPILL-O2-NEXT: li a3, 6
352 ; SPILL-O2-NEXT: mul a2, a2, a3
353 ; SPILL-O2-NEXT: sub sp, sp, a2
354 ; SPILL-O2-NEXT: vsetvli zero, a1, e32, m2, ta, ma
355 ; SPILL-O2-NEXT: vlseg3e32.v v8, (a0)
356 ; SPILL-O2-NEXT: addi a0, sp, 16
357 ; SPILL-O2-NEXT: csrr a1, vlenb
358 ; SPILL-O2-NEXT: slli a1, a1, 1
359 ; SPILL-O2-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
360 ; SPILL-O2-NEXT: add a0, a0, a1
361 ; SPILL-O2-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
362 ; SPILL-O2-NEXT: add a0, a0, a1
363 ; SPILL-O2-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
364 ; SPILL-O2-NEXT: #APP
365 ; SPILL-O2-NEXT: #NO_APP
366 ; SPILL-O2-NEXT: addi a0, sp, 16
367 ; SPILL-O2-NEXT: csrr a1, vlenb
368 ; SPILL-O2-NEXT: slli a1, a1, 1
369 ; SPILL-O2-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
370 ; SPILL-O2-NEXT: add a0, a0, a1
371 ; SPILL-O2-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
372 ; SPILL-O2-NEXT: add a0, a0, a1
373 ; SPILL-O2-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
374 ; SPILL-O2-NEXT: csrr a0, vlenb
375 ; SPILL-O2-NEXT: li a1, 6
376 ; SPILL-O2-NEXT: mul a0, a0, a1
377 ; SPILL-O2-NEXT: add sp, sp, a0
378 ; SPILL-O2-NEXT: addi sp, sp, 16
381 ; SPILL-O2-VLEN128-LABEL: spill_zvlsseg3_nxv4i32:
382 ; SPILL-O2-VLEN128: # %bb.0: # %entry
383 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -16
384 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, -96
385 ; SPILL-O2-VLEN128-NEXT: vsetvli zero, a1, e32, m2, ta, ma
386 ; SPILL-O2-VLEN128-NEXT: vlseg3e32.v v8, (a0)
387 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
388 ; SPILL-O2-VLEN128-NEXT: li a1, 32
389 ; SPILL-O2-VLEN128-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
390 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
391 ; SPILL-O2-VLEN128-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
392 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
393 ; SPILL-O2-VLEN128-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
394 ; SPILL-O2-VLEN128-NEXT: #APP
395 ; SPILL-O2-VLEN128-NEXT: #NO_APP
396 ; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
397 ; SPILL-O2-VLEN128-NEXT: li a1, 32
398 ; SPILL-O2-VLEN128-NEXT: vl2r.v v6, (a0) # Unknown-size Folded Reload
399 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
400 ; SPILL-O2-VLEN128-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
401 ; SPILL-O2-VLEN128-NEXT: add a0, a0, a1
402 ; SPILL-O2-VLEN128-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
403 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 96
404 ; SPILL-O2-VLEN128-NEXT: addi sp, sp, 16
405 ; SPILL-O2-VLEN128-NEXT: ret
407 %0 = tail call target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) undef, ptr %base, i64 %vl, i64 5)
408 call void asm sideeffect "",
409 "~{v0},~{v1},~{v2},~{v3},~{v4},~{v5},~{v6},~{v7},~{v8},~{v9},~{v10},~{v11},~{v12},~{v13},~{v14},~{v15},~{v16},~{v17},~{v18},~{v19},~{v20},~{v21},~{v22},~{v23},~{v24},~{v25},~{v26},~{v27},~{v28},~{v29},~{v30},~{v31}"()
410 %1 = call <vscale x 4 x i32> @llvm.riscv.tuple.extract.nxv4i32.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3) %0, i32 1)
411 ret <vscale x 4 x i32> %1
414 declare target("riscv.vector.tuple", <vscale x 4 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv4i8_2t(target("riscv.vector.tuple", <vscale x 4 x i8>, 2), ptr , i64, i64)
415 declare target("riscv.vector.tuple", <vscale x 8 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv8i8_2t(target("riscv.vector.tuple", <vscale x 8 x i8>, 2), ptr , i64, i64)
416 declare target("riscv.vector.tuple", <vscale x 16 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv16i8_2t(target("riscv.vector.tuple", <vscale x 16 x i8>, 2), ptr , i64, i64)
417 declare target("riscv.vector.tuple", <vscale x 32 x i8>, 2) @llvm.riscv.vlseg2.triscv.vector.tuple_nxv32i8_2t(target("riscv.vector.tuple", <vscale x 32 x i8>, 2), ptr , i64, i64)
418 declare target("riscv.vector.tuple", <vscale x 16 x i8>, 3) @llvm.riscv.vlseg3.triscv.vector.tuple_nxv16i8_3t(target("riscv.vector.tuple", <vscale x 16 x i8>, 3), ptr , i64, i64)