1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc --mtriple=loongarch32 --verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s --check-prefix=LA32
4 ; RUN: llc --mtriple=loongarch64 --verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s --check-prefix=LA64
7 declare void @callee(ptr)
9 define void @caller32() {
10 ; LA32-LABEL: caller32:
12 ; LA32-NEXT: addi.w $sp, $sp, -32
13 ; LA32-NEXT: .cfi_def_cfa_offset 32
14 ; LA32-NEXT: st.w $ra, $sp, 28 # 4-byte Folded Spill
15 ; LA32-NEXT: st.w $fp, $sp, 24 # 4-byte Folded Spill
16 ; LA32-NEXT: .cfi_offset 1, -4
17 ; LA32-NEXT: .cfi_offset 22, -8
18 ; LA32-NEXT: addi.w $fp, $sp, 32
19 ; LA32-NEXT: .cfi_def_cfa 22, 0
20 ; LA32-NEXT: bstrins.w $sp, $zero, 4, 0
21 ; LA32-NEXT: addi.w $a0, $sp, 0
22 ; LA32-NEXT: bl %plt(callee)
23 ; LA32-NEXT: addi.w $sp, $fp, -32
24 ; LA32-NEXT: ld.w $fp, $sp, 24 # 4-byte Folded Reload
25 ; LA32-NEXT: ld.w $ra, $sp, 28 # 4-byte Folded Reload
26 ; LA32-NEXT: addi.w $sp, $sp, 32
29 ; LA64-LABEL: caller32:
31 ; LA64-NEXT: addi.d $sp, $sp, -32
32 ; LA64-NEXT: .cfi_def_cfa_offset 32
33 ; LA64-NEXT: st.d $ra, $sp, 24 # 8-byte Folded Spill
34 ; LA64-NEXT: st.d $fp, $sp, 16 # 8-byte Folded Spill
35 ; LA64-NEXT: .cfi_offset 1, -8
36 ; LA64-NEXT: .cfi_offset 22, -16
37 ; LA64-NEXT: addi.d $fp, $sp, 32
38 ; LA64-NEXT: .cfi_def_cfa 22, 0
39 ; LA64-NEXT: bstrins.d $sp, $zero, 4, 0
40 ; LA64-NEXT: addi.d $a0, $sp, 0
41 ; LA64-NEXT: bl %plt(callee)
42 ; LA64-NEXT: addi.d $sp, $fp, -32
43 ; LA64-NEXT: ld.d $fp, $sp, 16 # 8-byte Folded Reload
44 ; LA64-NEXT: ld.d $ra, $sp, 24 # 8-byte Folded Reload
45 ; LA64-NEXT: addi.d $sp, $sp, 32
47 %1 = alloca i8, align 32
48 call void @callee(ptr %1)
52 define void @caller_no_realign32() "no-realign-stack" {
53 ; LA32-LABEL: caller_no_realign32:
55 ; LA32-NEXT: addi.w $sp, $sp, -16
56 ; LA32-NEXT: .cfi_def_cfa_offset 16
57 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
58 ; LA32-NEXT: .cfi_offset 1, -4
59 ; LA32-NEXT: addi.w $a0, $sp, 0
60 ; LA32-NEXT: bl %plt(callee)
61 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
62 ; LA32-NEXT: addi.w $sp, $sp, 16
65 ; LA64-LABEL: caller_no_realign32:
67 ; LA64-NEXT: addi.d $sp, $sp, -16
68 ; LA64-NEXT: .cfi_def_cfa_offset 16
69 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
70 ; LA64-NEXT: .cfi_offset 1, -8
71 ; LA64-NEXT: addi.d $a0, $sp, 0
72 ; LA64-NEXT: bl %plt(callee)
73 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
74 ; LA64-NEXT: addi.d $sp, $sp, 16
76 %1 = alloca i8, align 32
77 call void @callee(ptr %1)
81 define void @caller64() {
82 ; LA32-LABEL: caller64:
84 ; LA32-NEXT: addi.w $sp, $sp, -64
85 ; LA32-NEXT: .cfi_def_cfa_offset 64
86 ; LA32-NEXT: st.w $ra, $sp, 60 # 4-byte Folded Spill
87 ; LA32-NEXT: st.w $fp, $sp, 56 # 4-byte Folded Spill
88 ; LA32-NEXT: .cfi_offset 1, -4
89 ; LA32-NEXT: .cfi_offset 22, -8
90 ; LA32-NEXT: addi.w $fp, $sp, 64
91 ; LA32-NEXT: .cfi_def_cfa 22, 0
92 ; LA32-NEXT: bstrins.w $sp, $zero, 5, 0
93 ; LA32-NEXT: addi.w $a0, $sp, 0
94 ; LA32-NEXT: bl %plt(callee)
95 ; LA32-NEXT: addi.w $sp, $fp, -64
96 ; LA32-NEXT: ld.w $fp, $sp, 56 # 4-byte Folded Reload
97 ; LA32-NEXT: ld.w $ra, $sp, 60 # 4-byte Folded Reload
98 ; LA32-NEXT: addi.w $sp, $sp, 64
101 ; LA64-LABEL: caller64:
103 ; LA64-NEXT: addi.d $sp, $sp, -64
104 ; LA64-NEXT: .cfi_def_cfa_offset 64
105 ; LA64-NEXT: st.d $ra, $sp, 56 # 8-byte Folded Spill
106 ; LA64-NEXT: st.d $fp, $sp, 48 # 8-byte Folded Spill
107 ; LA64-NEXT: .cfi_offset 1, -8
108 ; LA64-NEXT: .cfi_offset 22, -16
109 ; LA64-NEXT: addi.d $fp, $sp, 64
110 ; LA64-NEXT: .cfi_def_cfa 22, 0
111 ; LA64-NEXT: bstrins.d $sp, $zero, 5, 0
112 ; LA64-NEXT: addi.d $a0, $sp, 0
113 ; LA64-NEXT: bl %plt(callee)
114 ; LA64-NEXT: addi.d $sp, $fp, -64
115 ; LA64-NEXT: ld.d $fp, $sp, 48 # 8-byte Folded Reload
116 ; LA64-NEXT: ld.d $ra, $sp, 56 # 8-byte Folded Reload
117 ; LA64-NEXT: addi.d $sp, $sp, 64
119 %1 = alloca i8, align 64
120 call void @callee(ptr %1)
124 define void @caller_no_realign64() "no-realign-stack" {
125 ; LA32-LABEL: caller_no_realign64:
127 ; LA32-NEXT: addi.w $sp, $sp, -16
128 ; LA32-NEXT: .cfi_def_cfa_offset 16
129 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
130 ; LA32-NEXT: .cfi_offset 1, -4
131 ; LA32-NEXT: addi.w $a0, $sp, 0
132 ; LA32-NEXT: bl %plt(callee)
133 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
134 ; LA32-NEXT: addi.w $sp, $sp, 16
137 ; LA64-LABEL: caller_no_realign64:
139 ; LA64-NEXT: addi.d $sp, $sp, -16
140 ; LA64-NEXT: .cfi_def_cfa_offset 16
141 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
142 ; LA64-NEXT: .cfi_offset 1, -8
143 ; LA64-NEXT: addi.d $a0, $sp, 0
144 ; LA64-NEXT: bl %plt(callee)
145 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
146 ; LA64-NEXT: addi.d $sp, $sp, 16
148 %1 = alloca i8, align 64
149 call void @callee(ptr %1)
153 define void @caller128() {
154 ; LA32-LABEL: caller128:
156 ; LA32-NEXT: addi.w $sp, $sp, -128
157 ; LA32-NEXT: .cfi_def_cfa_offset 128
158 ; LA32-NEXT: st.w $ra, $sp, 124 # 4-byte Folded Spill
159 ; LA32-NEXT: st.w $fp, $sp, 120 # 4-byte Folded Spill
160 ; LA32-NEXT: .cfi_offset 1, -4
161 ; LA32-NEXT: .cfi_offset 22, -8
162 ; LA32-NEXT: addi.w $fp, $sp, 128
163 ; LA32-NEXT: .cfi_def_cfa 22, 0
164 ; LA32-NEXT: bstrins.w $sp, $zero, 6, 0
165 ; LA32-NEXT: addi.w $a0, $sp, 0
166 ; LA32-NEXT: bl %plt(callee)
167 ; LA32-NEXT: addi.w $sp, $fp, -128
168 ; LA32-NEXT: ld.w $fp, $sp, 120 # 4-byte Folded Reload
169 ; LA32-NEXT: ld.w $ra, $sp, 124 # 4-byte Folded Reload
170 ; LA32-NEXT: addi.w $sp, $sp, 128
173 ; LA64-LABEL: caller128:
175 ; LA64-NEXT: addi.d $sp, $sp, -128
176 ; LA64-NEXT: .cfi_def_cfa_offset 128
177 ; LA64-NEXT: st.d $ra, $sp, 120 # 8-byte Folded Spill
178 ; LA64-NEXT: st.d $fp, $sp, 112 # 8-byte Folded Spill
179 ; LA64-NEXT: .cfi_offset 1, -8
180 ; LA64-NEXT: .cfi_offset 22, -16
181 ; LA64-NEXT: addi.d $fp, $sp, 128
182 ; LA64-NEXT: .cfi_def_cfa 22, 0
183 ; LA64-NEXT: bstrins.d $sp, $zero, 6, 0
184 ; LA64-NEXT: addi.d $a0, $sp, 0
185 ; LA64-NEXT: bl %plt(callee)
186 ; LA64-NEXT: addi.d $sp, $fp, -128
187 ; LA64-NEXT: ld.d $fp, $sp, 112 # 8-byte Folded Reload
188 ; LA64-NEXT: ld.d $ra, $sp, 120 # 8-byte Folded Reload
189 ; LA64-NEXT: addi.d $sp, $sp, 128
191 %1 = alloca i8, align 128
192 call void @callee(ptr %1)
196 define void @caller_no_realign128() "no-realign-stack" {
197 ; LA32-LABEL: caller_no_realign128:
199 ; LA32-NEXT: addi.w $sp, $sp, -16
200 ; LA32-NEXT: .cfi_def_cfa_offset 16
201 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
202 ; LA32-NEXT: .cfi_offset 1, -4
203 ; LA32-NEXT: addi.w $a0, $sp, 0
204 ; LA32-NEXT: bl %plt(callee)
205 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
206 ; LA32-NEXT: addi.w $sp, $sp, 16
209 ; LA64-LABEL: caller_no_realign128:
211 ; LA64-NEXT: addi.d $sp, $sp, -16
212 ; LA64-NEXT: .cfi_def_cfa_offset 16
213 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
214 ; LA64-NEXT: .cfi_offset 1, -8
215 ; LA64-NEXT: addi.d $a0, $sp, 0
216 ; LA64-NEXT: bl %plt(callee)
217 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
218 ; LA64-NEXT: addi.d $sp, $sp, 16
220 %1 = alloca i8, align 128
221 call void @callee(ptr %1)
225 define void @caller256() {
226 ; LA32-LABEL: caller256:
228 ; LA32-NEXT: addi.w $sp, $sp, -256
229 ; LA32-NEXT: .cfi_def_cfa_offset 256
230 ; LA32-NEXT: st.w $ra, $sp, 252 # 4-byte Folded Spill
231 ; LA32-NEXT: st.w $fp, $sp, 248 # 4-byte Folded Spill
232 ; LA32-NEXT: .cfi_offset 1, -4
233 ; LA32-NEXT: .cfi_offset 22, -8
234 ; LA32-NEXT: addi.w $fp, $sp, 256
235 ; LA32-NEXT: .cfi_def_cfa 22, 0
236 ; LA32-NEXT: bstrins.w $sp, $zero, 7, 0
237 ; LA32-NEXT: addi.w $a0, $sp, 0
238 ; LA32-NEXT: bl %plt(callee)
239 ; LA32-NEXT: addi.w $sp, $fp, -256
240 ; LA32-NEXT: ld.w $fp, $sp, 248 # 4-byte Folded Reload
241 ; LA32-NEXT: ld.w $ra, $sp, 252 # 4-byte Folded Reload
242 ; LA32-NEXT: addi.w $sp, $sp, 256
245 ; LA64-LABEL: caller256:
247 ; LA64-NEXT: addi.d $sp, $sp, -256
248 ; LA64-NEXT: .cfi_def_cfa_offset 256
249 ; LA64-NEXT: st.d $ra, $sp, 248 # 8-byte Folded Spill
250 ; LA64-NEXT: st.d $fp, $sp, 240 # 8-byte Folded Spill
251 ; LA64-NEXT: .cfi_offset 1, -8
252 ; LA64-NEXT: .cfi_offset 22, -16
253 ; LA64-NEXT: addi.d $fp, $sp, 256
254 ; LA64-NEXT: .cfi_def_cfa 22, 0
255 ; LA64-NEXT: bstrins.d $sp, $zero, 7, 0
256 ; LA64-NEXT: addi.d $a0, $sp, 0
257 ; LA64-NEXT: bl %plt(callee)
258 ; LA64-NEXT: addi.d $sp, $fp, -256
259 ; LA64-NEXT: ld.d $fp, $sp, 240 # 8-byte Folded Reload
260 ; LA64-NEXT: ld.d $ra, $sp, 248 # 8-byte Folded Reload
261 ; LA64-NEXT: addi.d $sp, $sp, 256
263 %1 = alloca i8, align 256
264 call void @callee(ptr %1)
268 define void @caller_no_realign256() "no-realign-stack" {
269 ; LA32-LABEL: caller_no_realign256:
271 ; LA32-NEXT: addi.w $sp, $sp, -16
272 ; LA32-NEXT: .cfi_def_cfa_offset 16
273 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
274 ; LA32-NEXT: .cfi_offset 1, -4
275 ; LA32-NEXT: addi.w $a0, $sp, 0
276 ; LA32-NEXT: bl %plt(callee)
277 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
278 ; LA32-NEXT: addi.w $sp, $sp, 16
281 ; LA64-LABEL: caller_no_realign256:
283 ; LA64-NEXT: addi.d $sp, $sp, -16
284 ; LA64-NEXT: .cfi_def_cfa_offset 16
285 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
286 ; LA64-NEXT: .cfi_offset 1, -8
287 ; LA64-NEXT: addi.d $a0, $sp, 0
288 ; LA64-NEXT: bl %plt(callee)
289 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
290 ; LA64-NEXT: addi.d $sp, $sp, 16
292 %1 = alloca i8, align 256
293 call void @callee(ptr %1)
297 define void @caller512() {
298 ; LA32-LABEL: caller512:
300 ; LA32-NEXT: addi.w $sp, $sp, -1024
301 ; LA32-NEXT: .cfi_def_cfa_offset 1024
302 ; LA32-NEXT: st.w $ra, $sp, 1020 # 4-byte Folded Spill
303 ; LA32-NEXT: st.w $fp, $sp, 1016 # 4-byte Folded Spill
304 ; LA32-NEXT: .cfi_offset 1, -4
305 ; LA32-NEXT: .cfi_offset 22, -8
306 ; LA32-NEXT: addi.w $fp, $sp, 1024
307 ; LA32-NEXT: .cfi_def_cfa 22, 0
308 ; LA32-NEXT: bstrins.w $sp, $zero, 8, 0
309 ; LA32-NEXT: addi.w $a0, $sp, 512
310 ; LA32-NEXT: bl %plt(callee)
311 ; LA32-NEXT: addi.w $sp, $fp, -1024
312 ; LA32-NEXT: ld.w $fp, $sp, 1016 # 4-byte Folded Reload
313 ; LA32-NEXT: ld.w $ra, $sp, 1020 # 4-byte Folded Reload
314 ; LA32-NEXT: addi.w $sp, $sp, 1024
317 ; LA64-LABEL: caller512:
319 ; LA64-NEXT: addi.d $sp, $sp, -1024
320 ; LA64-NEXT: .cfi_def_cfa_offset 1024
321 ; LA64-NEXT: st.d $ra, $sp, 1016 # 8-byte Folded Spill
322 ; LA64-NEXT: st.d $fp, $sp, 1008 # 8-byte Folded Spill
323 ; LA64-NEXT: .cfi_offset 1, -8
324 ; LA64-NEXT: .cfi_offset 22, -16
325 ; LA64-NEXT: addi.d $fp, $sp, 1024
326 ; LA64-NEXT: .cfi_def_cfa 22, 0
327 ; LA64-NEXT: bstrins.d $sp, $zero, 8, 0
328 ; LA64-NEXT: addi.d $a0, $sp, 512
329 ; LA64-NEXT: bl %plt(callee)
330 ; LA64-NEXT: addi.d $sp, $fp, -1024
331 ; LA64-NEXT: ld.d $fp, $sp, 1008 # 8-byte Folded Reload
332 ; LA64-NEXT: ld.d $ra, $sp, 1016 # 8-byte Folded Reload
333 ; LA64-NEXT: addi.d $sp, $sp, 1024
335 %1 = alloca i8, align 512
336 call void @callee(ptr %1)
340 define void @caller_no_realign512() "no-realign-stack" {
341 ; LA32-LABEL: caller_no_realign512:
343 ; LA32-NEXT: addi.w $sp, $sp, -16
344 ; LA32-NEXT: .cfi_def_cfa_offset 16
345 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
346 ; LA32-NEXT: .cfi_offset 1, -4
347 ; LA32-NEXT: addi.w $a0, $sp, 0
348 ; LA32-NEXT: bl %plt(callee)
349 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
350 ; LA32-NEXT: addi.w $sp, $sp, 16
353 ; LA64-LABEL: caller_no_realign512:
355 ; LA64-NEXT: addi.d $sp, $sp, -16
356 ; LA64-NEXT: .cfi_def_cfa_offset 16
357 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
358 ; LA64-NEXT: .cfi_offset 1, -8
359 ; LA64-NEXT: addi.d $a0, $sp, 0
360 ; LA64-NEXT: bl %plt(callee)
361 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
362 ; LA64-NEXT: addi.d $sp, $sp, 16
364 %1 = alloca i8, align 512
365 call void @callee(ptr %1)
369 define void @caller1024() {
370 ; LA32-LABEL: caller1024:
372 ; LA32-NEXT: addi.w $sp, $sp, -2032
373 ; LA32-NEXT: .cfi_def_cfa_offset 2032
374 ; LA32-NEXT: st.w $ra, $sp, 2028 # 4-byte Folded Spill
375 ; LA32-NEXT: st.w $fp, $sp, 2024 # 4-byte Folded Spill
376 ; LA32-NEXT: .cfi_offset 1, -4
377 ; LA32-NEXT: .cfi_offset 22, -8
378 ; LA32-NEXT: addi.w $fp, $sp, 2032
379 ; LA32-NEXT: .cfi_def_cfa 22, 0
380 ; LA32-NEXT: addi.w $sp, $sp, -16
381 ; LA32-NEXT: bstrins.w $sp, $zero, 9, 0
382 ; LA32-NEXT: addi.w $a0, $sp, 1024
383 ; LA32-NEXT: bl %plt(callee)
384 ; LA32-NEXT: addi.w $sp, $fp, -2048
385 ; LA32-NEXT: addi.w $sp, $sp, 16
386 ; LA32-NEXT: ld.w $fp, $sp, 2024 # 4-byte Folded Reload
387 ; LA32-NEXT: ld.w $ra, $sp, 2028 # 4-byte Folded Reload
388 ; LA32-NEXT: addi.w $sp, $sp, 2032
391 ; LA64-LABEL: caller1024:
393 ; LA64-NEXT: addi.d $sp, $sp, -2032
394 ; LA64-NEXT: .cfi_def_cfa_offset 2032
395 ; LA64-NEXT: st.d $ra, $sp, 2024 # 8-byte Folded Spill
396 ; LA64-NEXT: st.d $fp, $sp, 2016 # 8-byte Folded Spill
397 ; LA64-NEXT: .cfi_offset 1, -8
398 ; LA64-NEXT: .cfi_offset 22, -16
399 ; LA64-NEXT: addi.d $fp, $sp, 2032
400 ; LA64-NEXT: .cfi_def_cfa 22, 0
401 ; LA64-NEXT: addi.d $sp, $sp, -16
402 ; LA64-NEXT: bstrins.d $sp, $zero, 9, 0
403 ; LA64-NEXT: addi.d $a0, $sp, 1024
404 ; LA64-NEXT: bl %plt(callee)
405 ; LA64-NEXT: addi.d $sp, $fp, -2048
406 ; LA64-NEXT: addi.d $sp, $sp, 16
407 ; LA64-NEXT: ld.d $fp, $sp, 2016 # 8-byte Folded Reload
408 ; LA64-NEXT: ld.d $ra, $sp, 2024 # 8-byte Folded Reload
409 ; LA64-NEXT: addi.d $sp, $sp, 2032
411 %1 = alloca i8, align 1024
412 call void @callee(ptr %1)
416 define void @caller_no_realign1024() "no-realign-stack" {
417 ; LA32-LABEL: caller_no_realign1024:
419 ; LA32-NEXT: addi.w $sp, $sp, -16
420 ; LA32-NEXT: .cfi_def_cfa_offset 16
421 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
422 ; LA32-NEXT: .cfi_offset 1, -4
423 ; LA32-NEXT: addi.w $a0, $sp, 0
424 ; LA32-NEXT: bl %plt(callee)
425 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
426 ; LA32-NEXT: addi.w $sp, $sp, 16
429 ; LA64-LABEL: caller_no_realign1024:
431 ; LA64-NEXT: addi.d $sp, $sp, -16
432 ; LA64-NEXT: .cfi_def_cfa_offset 16
433 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
434 ; LA64-NEXT: .cfi_offset 1, -8
435 ; LA64-NEXT: addi.d $a0, $sp, 0
436 ; LA64-NEXT: bl %plt(callee)
437 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
438 ; LA64-NEXT: addi.d $sp, $sp, 16
440 %1 = alloca i8, align 1024
441 call void @callee(ptr %1)
445 define void @caller2048() {
446 ; LA32-LABEL: caller2048:
448 ; LA32-NEXT: addi.w $sp, $sp, -2032
449 ; LA32-NEXT: .cfi_def_cfa_offset 2032
450 ; LA32-NEXT: st.w $ra, $sp, 2028 # 4-byte Folded Spill
451 ; LA32-NEXT: st.w $fp, $sp, 2024 # 4-byte Folded Spill
452 ; LA32-NEXT: .cfi_offset 1, -4
453 ; LA32-NEXT: .cfi_offset 22, -8
454 ; LA32-NEXT: addi.w $fp, $sp, 2032
455 ; LA32-NEXT: .cfi_def_cfa 22, 0
456 ; LA32-NEXT: addi.w $sp, $sp, -2048
457 ; LA32-NEXT: addi.w $sp, $sp, -16
458 ; LA32-NEXT: bstrins.w $sp, $zero, 10, 0
459 ; LA32-NEXT: ori $a0, $zero, 2048
460 ; LA32-NEXT: add.w $a0, $sp, $a0
461 ; LA32-NEXT: bl %plt(callee)
462 ; LA32-NEXT: lu12i.w $a0, 1
463 ; LA32-NEXT: sub.w $sp, $fp, $a0
464 ; LA32-NEXT: addi.w $sp, $sp, 2032
465 ; LA32-NEXT: addi.w $sp, $sp, 32
466 ; LA32-NEXT: ld.w $fp, $sp, 2024 # 4-byte Folded Reload
467 ; LA32-NEXT: ld.w $ra, $sp, 2028 # 4-byte Folded Reload
468 ; LA32-NEXT: addi.w $sp, $sp, 2032
471 ; LA64-LABEL: caller2048:
473 ; LA64-NEXT: addi.d $sp, $sp, -2032
474 ; LA64-NEXT: .cfi_def_cfa_offset 2032
475 ; LA64-NEXT: st.d $ra, $sp, 2024 # 8-byte Folded Spill
476 ; LA64-NEXT: st.d $fp, $sp, 2016 # 8-byte Folded Spill
477 ; LA64-NEXT: .cfi_offset 1, -8
478 ; LA64-NEXT: .cfi_offset 22, -16
479 ; LA64-NEXT: addi.d $fp, $sp, 2032
480 ; LA64-NEXT: .cfi_def_cfa 22, 0
481 ; LA64-NEXT: addi.d $sp, $sp, -2048
482 ; LA64-NEXT: addi.d $sp, $sp, -16
483 ; LA64-NEXT: bstrins.d $sp, $zero, 10, 0
484 ; LA64-NEXT: ori $a0, $zero, 2048
485 ; LA64-NEXT: add.d $a0, $sp, $a0
486 ; LA64-NEXT: bl %plt(callee)
487 ; LA64-NEXT: lu12i.w $a0, 1
488 ; LA64-NEXT: sub.d $sp, $fp, $a0
489 ; LA64-NEXT: addi.d $sp, $sp, 2032
490 ; LA64-NEXT: addi.d $sp, $sp, 32
491 ; LA64-NEXT: ld.d $fp, $sp, 2016 # 8-byte Folded Reload
492 ; LA64-NEXT: ld.d $ra, $sp, 2024 # 8-byte Folded Reload
493 ; LA64-NEXT: addi.d $sp, $sp, 2032
495 %1 = alloca i8, align 2048
496 call void @callee(ptr %1)
500 define void @caller_no_realign2048() "no-realign-stack" {
501 ; LA32-LABEL: caller_no_realign2048:
503 ; LA32-NEXT: addi.w $sp, $sp, -16
504 ; LA32-NEXT: .cfi_def_cfa_offset 16
505 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
506 ; LA32-NEXT: .cfi_offset 1, -4
507 ; LA32-NEXT: addi.w $a0, $sp, 0
508 ; LA32-NEXT: bl %plt(callee)
509 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
510 ; LA32-NEXT: addi.w $sp, $sp, 16
513 ; LA64-LABEL: caller_no_realign2048:
515 ; LA64-NEXT: addi.d $sp, $sp, -16
516 ; LA64-NEXT: .cfi_def_cfa_offset 16
517 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
518 ; LA64-NEXT: .cfi_offset 1, -8
519 ; LA64-NEXT: addi.d $a0, $sp, 0
520 ; LA64-NEXT: bl %plt(callee)
521 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
522 ; LA64-NEXT: addi.d $sp, $sp, 16
524 %1 = alloca i8, align 2048
525 call void @callee(ptr %1)
529 define void @caller4096() {
530 ; LA32-LABEL: caller4096:
532 ; LA32-NEXT: addi.w $sp, $sp, -2032
533 ; LA32-NEXT: .cfi_def_cfa_offset 2032
534 ; LA32-NEXT: st.w $ra, $sp, 2028 # 4-byte Folded Spill
535 ; LA32-NEXT: st.w $fp, $sp, 2024 # 4-byte Folded Spill
536 ; LA32-NEXT: .cfi_offset 1, -4
537 ; LA32-NEXT: .cfi_offset 22, -8
538 ; LA32-NEXT: addi.w $fp, $sp, 2032
539 ; LA32-NEXT: .cfi_def_cfa 22, 0
540 ; LA32-NEXT: lu12i.w $a0, 1
541 ; LA32-NEXT: ori $a0, $a0, 2064
542 ; LA32-NEXT: sub.w $sp, $sp, $a0
543 ; LA32-NEXT: bstrins.w $sp, $zero, 11, 0
544 ; LA32-NEXT: lu12i.w $a0, 1
545 ; LA32-NEXT: add.w $a0, $sp, $a0
546 ; LA32-NEXT: bl %plt(callee)
547 ; LA32-NEXT: lu12i.w $a0, 2
548 ; LA32-NEXT: sub.w $sp, $fp, $a0
549 ; LA32-NEXT: lu12i.w $a0, 1
550 ; LA32-NEXT: ori $a0, $a0, 2064
551 ; LA32-NEXT: add.w $sp, $sp, $a0
552 ; LA32-NEXT: ld.w $fp, $sp, 2024 # 4-byte Folded Reload
553 ; LA32-NEXT: ld.w $ra, $sp, 2028 # 4-byte Folded Reload
554 ; LA32-NEXT: addi.w $sp, $sp, 2032
557 ; LA64-LABEL: caller4096:
559 ; LA64-NEXT: addi.d $sp, $sp, -2032
560 ; LA64-NEXT: .cfi_def_cfa_offset 2032
561 ; LA64-NEXT: st.d $ra, $sp, 2024 # 8-byte Folded Spill
562 ; LA64-NEXT: st.d $fp, $sp, 2016 # 8-byte Folded Spill
563 ; LA64-NEXT: .cfi_offset 1, -8
564 ; LA64-NEXT: .cfi_offset 22, -16
565 ; LA64-NEXT: addi.d $fp, $sp, 2032
566 ; LA64-NEXT: .cfi_def_cfa 22, 0
567 ; LA64-NEXT: lu12i.w $a0, 1
568 ; LA64-NEXT: ori $a0, $a0, 2064
569 ; LA64-NEXT: sub.d $sp, $sp, $a0
570 ; LA64-NEXT: bstrins.d $sp, $zero, 11, 0
571 ; LA64-NEXT: lu12i.w $a0, 1
572 ; LA64-NEXT: add.d $a0, $sp, $a0
573 ; LA64-NEXT: bl %plt(callee)
574 ; LA64-NEXT: lu12i.w $a0, 2
575 ; LA64-NEXT: sub.d $sp, $fp, $a0
576 ; LA64-NEXT: lu12i.w $a0, 1
577 ; LA64-NEXT: ori $a0, $a0, 2064
578 ; LA64-NEXT: add.d $sp, $sp, $a0
579 ; LA64-NEXT: ld.d $fp, $sp, 2016 # 8-byte Folded Reload
580 ; LA64-NEXT: ld.d $ra, $sp, 2024 # 8-byte Folded Reload
581 ; LA64-NEXT: addi.d $sp, $sp, 2032
583 %1 = alloca i8, align 4096
584 call void @callee(ptr %1)
588 define void @caller_no_realign4096() "no-realign-stack" {
589 ; LA32-LABEL: caller_no_realign4096:
591 ; LA32-NEXT: addi.w $sp, $sp, -16
592 ; LA32-NEXT: .cfi_def_cfa_offset 16
593 ; LA32-NEXT: st.w $ra, $sp, 12 # 4-byte Folded Spill
594 ; LA32-NEXT: .cfi_offset 1, -4
595 ; LA32-NEXT: addi.w $a0, $sp, 0
596 ; LA32-NEXT: bl %plt(callee)
597 ; LA32-NEXT: ld.w $ra, $sp, 12 # 4-byte Folded Reload
598 ; LA32-NEXT: addi.w $sp, $sp, 16
601 ; LA64-LABEL: caller_no_realign4096:
603 ; LA64-NEXT: addi.d $sp, $sp, -16
604 ; LA64-NEXT: .cfi_def_cfa_offset 16
605 ; LA64-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill
606 ; LA64-NEXT: .cfi_offset 1, -8
607 ; LA64-NEXT: addi.d $a0, $sp, 0
608 ; LA64-NEXT: bl %plt(callee)
609 ; LA64-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload
610 ; LA64-NEXT: addi.d $sp, $sp, 16
612 %1 = alloca i8, align 4096
613 call void @callee(ptr %1)