1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=riscv32 -mattr=+zcmp -verify-machineinstrs < %s \
3 ; RUN: | FileCheck %s -check-prefixes=RV32IZCMP
4 ; RUN: llc -mtriple=riscv64 -mattr=+zcmp -verify-machineinstrs < %s \
5 ; RUN: | FileCheck %s -check-prefixes=RV64IZCMP
6 ; RUN: llc -mtriple=riscv32 -mattr=+zcmp,+save-restore \
7 ; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV32IZCMP-SR
8 ; RUN: llc -mtriple=riscv64 -mattr=+zcmp,+save-restore \
9 ; RUN: -verify-machineinstrs < %s | FileCheck %s -check-prefixes=RV64IZCMP-SR
10 ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
11 ; RUN: | FileCheck -check-prefixes=RV32I %s
12 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
13 ; RUN: | FileCheck -check-prefixes=RV64I %s
15 declare void @test(i8*)
16 declare void @callee_void(i8*)
17 declare i32 @callee(i8*)
20 ; RV32IZCMP-LABEL: foo:
22 ; RV32IZCMP-NEXT: cm.push {ra}, -64
23 ; RV32IZCMP-NEXT: addi sp, sp, -464
24 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 528
25 ; RV32IZCMP-NEXT: .cfi_offset ra, -4
26 ; RV32IZCMP-NEXT: mv a0, sp
27 ; RV32IZCMP-NEXT: call test
28 ; RV32IZCMP-NEXT: addi sp, sp, 464
29 ; RV32IZCMP-NEXT: cm.popretz {ra}, 64
31 ; RV64IZCMP-LABEL: foo:
33 ; RV64IZCMP-NEXT: cm.push {ra}, -64
34 ; RV64IZCMP-NEXT: addi sp, sp, -464
35 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 528
36 ; RV64IZCMP-NEXT: .cfi_offset ra, -8
37 ; RV64IZCMP-NEXT: mv a0, sp
38 ; RV64IZCMP-NEXT: call test
39 ; RV64IZCMP-NEXT: addi sp, sp, 464
40 ; RV64IZCMP-NEXT: cm.popretz {ra}, 64
42 ; RV32IZCMP-SR-LABEL: foo:
43 ; RV32IZCMP-SR: # %bb.0:
44 ; RV32IZCMP-SR-NEXT: cm.push {ra}, -64
45 ; RV32IZCMP-SR-NEXT: addi sp, sp, -464
46 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
47 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -4
48 ; RV32IZCMP-SR-NEXT: mv a0, sp
49 ; RV32IZCMP-SR-NEXT: call test
50 ; RV32IZCMP-SR-NEXT: addi sp, sp, 464
51 ; RV32IZCMP-SR-NEXT: cm.popretz {ra}, 64
53 ; RV64IZCMP-SR-LABEL: foo:
54 ; RV64IZCMP-SR: # %bb.0:
55 ; RV64IZCMP-SR-NEXT: cm.push {ra}, -64
56 ; RV64IZCMP-SR-NEXT: addi sp, sp, -464
57 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 528
58 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -8
59 ; RV64IZCMP-SR-NEXT: mv a0, sp
60 ; RV64IZCMP-SR-NEXT: call test
61 ; RV64IZCMP-SR-NEXT: addi sp, sp, 464
62 ; RV64IZCMP-SR-NEXT: cm.popretz {ra}, 64
66 ; RV32I-NEXT: addi sp, sp, -528
67 ; RV32I-NEXT: .cfi_def_cfa_offset 528
68 ; RV32I-NEXT: sw ra, 524(sp) # 4-byte Folded Spill
69 ; RV32I-NEXT: .cfi_offset ra, -4
70 ; RV32I-NEXT: addi a0, sp, 12
71 ; RV32I-NEXT: call test
72 ; RV32I-NEXT: li a0, 0
73 ; RV32I-NEXT: lw ra, 524(sp) # 4-byte Folded Reload
74 ; RV32I-NEXT: addi sp, sp, 528
79 ; RV64I-NEXT: addi sp, sp, -528
80 ; RV64I-NEXT: .cfi_def_cfa_offset 528
81 ; RV64I-NEXT: sd ra, 520(sp) # 8-byte Folded Spill
82 ; RV64I-NEXT: .cfi_offset ra, -8
83 ; RV64I-NEXT: addi a0, sp, 8
84 ; RV64I-NEXT: call test
85 ; RV64I-NEXT: li a0, 0
86 ; RV64I-NEXT: ld ra, 520(sp) # 8-byte Folded Reload
87 ; RV64I-NEXT: addi sp, sp, 528
89 %1 = alloca [512 x i8]
90 %2 = getelementptr [512 x i8], [512 x i8]* %1, i32 0, i32 0
91 call void @test(i8* %2)
95 define i32 @pushpopret0(i32 signext %size){
96 ; RV32IZCMP-LABEL: pushpopret0:
97 ; RV32IZCMP: # %bb.0: # %entry
98 ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
99 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
100 ; RV32IZCMP-NEXT: .cfi_offset ra, -8
101 ; RV32IZCMP-NEXT: .cfi_offset s0, -4
102 ; RV32IZCMP-NEXT: addi s0, sp, 16
103 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
104 ; RV32IZCMP-NEXT: addi a0, a0, 15
105 ; RV32IZCMP-NEXT: andi a0, a0, -16
106 ; RV32IZCMP-NEXT: sub a0, sp, a0
107 ; RV32IZCMP-NEXT: mv sp, a0
108 ; RV32IZCMP-NEXT: call callee_void
109 ; RV32IZCMP-NEXT: addi sp, s0, -16
110 ; RV32IZCMP-NEXT: cm.popretz {ra, s0}, 16
112 ; RV64IZCMP-LABEL: pushpopret0:
113 ; RV64IZCMP: # %bb.0: # %entry
114 ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
115 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
116 ; RV64IZCMP-NEXT: .cfi_offset ra, -16
117 ; RV64IZCMP-NEXT: .cfi_offset s0, -8
118 ; RV64IZCMP-NEXT: addi s0, sp, 16
119 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
120 ; RV64IZCMP-NEXT: slli a0, a0, 32
121 ; RV64IZCMP-NEXT: srli a0, a0, 32
122 ; RV64IZCMP-NEXT: addi a0, a0, 15
123 ; RV64IZCMP-NEXT: andi a0, a0, -16
124 ; RV64IZCMP-NEXT: sub a0, sp, a0
125 ; RV64IZCMP-NEXT: mv sp, a0
126 ; RV64IZCMP-NEXT: call callee_void
127 ; RV64IZCMP-NEXT: addi sp, s0, -16
128 ; RV64IZCMP-NEXT: cm.popretz {ra, s0}, 16
130 ; RV32IZCMP-SR-LABEL: pushpopret0:
131 ; RV32IZCMP-SR: # %bb.0: # %entry
132 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
133 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
134 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
135 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
136 ; RV32IZCMP-SR-NEXT: addi s0, sp, 16
137 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
138 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15
139 ; RV32IZCMP-SR-NEXT: andi a0, a0, -16
140 ; RV32IZCMP-SR-NEXT: sub a0, sp, a0
141 ; RV32IZCMP-SR-NEXT: mv sp, a0
142 ; RV32IZCMP-SR-NEXT: call callee_void
143 ; RV32IZCMP-SR-NEXT: addi sp, s0, -16
144 ; RV32IZCMP-SR-NEXT: cm.popretz {ra, s0}, 16
146 ; RV64IZCMP-SR-LABEL: pushpopret0:
147 ; RV64IZCMP-SR: # %bb.0: # %entry
148 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
149 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
150 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
151 ; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
152 ; RV64IZCMP-SR-NEXT: addi s0, sp, 16
153 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
154 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32
155 ; RV64IZCMP-SR-NEXT: srli a0, a0, 32
156 ; RV64IZCMP-SR-NEXT: addi a0, a0, 15
157 ; RV64IZCMP-SR-NEXT: andi a0, a0, -16
158 ; RV64IZCMP-SR-NEXT: sub a0, sp, a0
159 ; RV64IZCMP-SR-NEXT: mv sp, a0
160 ; RV64IZCMP-SR-NEXT: call callee_void
161 ; RV64IZCMP-SR-NEXT: addi sp, s0, -16
162 ; RV64IZCMP-SR-NEXT: cm.popretz {ra, s0}, 16
164 ; RV32I-LABEL: pushpopret0:
165 ; RV32I: # %bb.0: # %entry
166 ; RV32I-NEXT: addi sp, sp, -16
167 ; RV32I-NEXT: .cfi_def_cfa_offset 16
168 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
169 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
170 ; RV32I-NEXT: .cfi_offset ra, -4
171 ; RV32I-NEXT: .cfi_offset s0, -8
172 ; RV32I-NEXT: addi s0, sp, 16
173 ; RV32I-NEXT: .cfi_def_cfa s0, 0
174 ; RV32I-NEXT: addi a0, a0, 15
175 ; RV32I-NEXT: andi a0, a0, -16
176 ; RV32I-NEXT: sub a0, sp, a0
177 ; RV32I-NEXT: mv sp, a0
178 ; RV32I-NEXT: call callee_void
179 ; RV32I-NEXT: li a0, 0
180 ; RV32I-NEXT: addi sp, s0, -16
181 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
182 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
183 ; RV32I-NEXT: addi sp, sp, 16
186 ; RV64I-LABEL: pushpopret0:
187 ; RV64I: # %bb.0: # %entry
188 ; RV64I-NEXT: addi sp, sp, -16
189 ; RV64I-NEXT: .cfi_def_cfa_offset 16
190 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
191 ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
192 ; RV64I-NEXT: .cfi_offset ra, -8
193 ; RV64I-NEXT: .cfi_offset s0, -16
194 ; RV64I-NEXT: addi s0, sp, 16
195 ; RV64I-NEXT: .cfi_def_cfa s0, 0
196 ; RV64I-NEXT: slli a0, a0, 32
197 ; RV64I-NEXT: srli a0, a0, 32
198 ; RV64I-NEXT: addi a0, a0, 15
199 ; RV64I-NEXT: andi a0, a0, -16
200 ; RV64I-NEXT: sub a0, sp, a0
201 ; RV64I-NEXT: mv sp, a0
202 ; RV64I-NEXT: call callee_void
203 ; RV64I-NEXT: li a0, 0
204 ; RV64I-NEXT: addi sp, s0, -16
205 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
206 ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
207 ; RV64I-NEXT: addi sp, sp, 16
210 %0 = alloca i8, i32 %size, align 16
211 call void @callee_void(i8* nonnull %0)
215 define i32 @pushpopret1(i32 signext %size) {
216 ; RV32IZCMP-LABEL: pushpopret1:
217 ; RV32IZCMP: # %bb.0: # %entry
218 ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
219 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
220 ; RV32IZCMP-NEXT: .cfi_offset ra, -8
221 ; RV32IZCMP-NEXT: .cfi_offset s0, -4
222 ; RV32IZCMP-NEXT: addi s0, sp, 16
223 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
224 ; RV32IZCMP-NEXT: addi a0, a0, 15
225 ; RV32IZCMP-NEXT: andi a0, a0, -16
226 ; RV32IZCMP-NEXT: sub a0, sp, a0
227 ; RV32IZCMP-NEXT: mv sp, a0
228 ; RV32IZCMP-NEXT: call callee_void
229 ; RV32IZCMP-NEXT: li a0, 1
230 ; RV32IZCMP-NEXT: addi sp, s0, -16
231 ; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
233 ; RV64IZCMP-LABEL: pushpopret1:
234 ; RV64IZCMP: # %bb.0: # %entry
235 ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
236 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
237 ; RV64IZCMP-NEXT: .cfi_offset ra, -16
238 ; RV64IZCMP-NEXT: .cfi_offset s0, -8
239 ; RV64IZCMP-NEXT: addi s0, sp, 16
240 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
241 ; RV64IZCMP-NEXT: slli a0, a0, 32
242 ; RV64IZCMP-NEXT: srli a0, a0, 32
243 ; RV64IZCMP-NEXT: addi a0, a0, 15
244 ; RV64IZCMP-NEXT: andi a0, a0, -16
245 ; RV64IZCMP-NEXT: sub a0, sp, a0
246 ; RV64IZCMP-NEXT: mv sp, a0
247 ; RV64IZCMP-NEXT: call callee_void
248 ; RV64IZCMP-NEXT: li a0, 1
249 ; RV64IZCMP-NEXT: addi sp, s0, -16
250 ; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
252 ; RV32IZCMP-SR-LABEL: pushpopret1:
253 ; RV32IZCMP-SR: # %bb.0: # %entry
254 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
255 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
256 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
257 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
258 ; RV32IZCMP-SR-NEXT: addi s0, sp, 16
259 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
260 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15
261 ; RV32IZCMP-SR-NEXT: andi a0, a0, -16
262 ; RV32IZCMP-SR-NEXT: sub a0, sp, a0
263 ; RV32IZCMP-SR-NEXT: mv sp, a0
264 ; RV32IZCMP-SR-NEXT: call callee_void
265 ; RV32IZCMP-SR-NEXT: li a0, 1
266 ; RV32IZCMP-SR-NEXT: addi sp, s0, -16
267 ; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
269 ; RV64IZCMP-SR-LABEL: pushpopret1:
270 ; RV64IZCMP-SR: # %bb.0: # %entry
271 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
272 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
273 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
274 ; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
275 ; RV64IZCMP-SR-NEXT: addi s0, sp, 16
276 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
277 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32
278 ; RV64IZCMP-SR-NEXT: srli a0, a0, 32
279 ; RV64IZCMP-SR-NEXT: addi a0, a0, 15
280 ; RV64IZCMP-SR-NEXT: andi a0, a0, -16
281 ; RV64IZCMP-SR-NEXT: sub a0, sp, a0
282 ; RV64IZCMP-SR-NEXT: mv sp, a0
283 ; RV64IZCMP-SR-NEXT: call callee_void
284 ; RV64IZCMP-SR-NEXT: li a0, 1
285 ; RV64IZCMP-SR-NEXT: addi sp, s0, -16
286 ; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
288 ; RV32I-LABEL: pushpopret1:
289 ; RV32I: # %bb.0: # %entry
290 ; RV32I-NEXT: addi sp, sp, -16
291 ; RV32I-NEXT: .cfi_def_cfa_offset 16
292 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
293 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
294 ; RV32I-NEXT: .cfi_offset ra, -4
295 ; RV32I-NEXT: .cfi_offset s0, -8
296 ; RV32I-NEXT: addi s0, sp, 16
297 ; RV32I-NEXT: .cfi_def_cfa s0, 0
298 ; RV32I-NEXT: addi a0, a0, 15
299 ; RV32I-NEXT: andi a0, a0, -16
300 ; RV32I-NEXT: sub a0, sp, a0
301 ; RV32I-NEXT: mv sp, a0
302 ; RV32I-NEXT: call callee_void
303 ; RV32I-NEXT: li a0, 1
304 ; RV32I-NEXT: addi sp, s0, -16
305 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
306 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
307 ; RV32I-NEXT: addi sp, sp, 16
310 ; RV64I-LABEL: pushpopret1:
311 ; RV64I: # %bb.0: # %entry
312 ; RV64I-NEXT: addi sp, sp, -16
313 ; RV64I-NEXT: .cfi_def_cfa_offset 16
314 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
315 ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
316 ; RV64I-NEXT: .cfi_offset ra, -8
317 ; RV64I-NEXT: .cfi_offset s0, -16
318 ; RV64I-NEXT: addi s0, sp, 16
319 ; RV64I-NEXT: .cfi_def_cfa s0, 0
320 ; RV64I-NEXT: slli a0, a0, 32
321 ; RV64I-NEXT: srli a0, a0, 32
322 ; RV64I-NEXT: addi a0, a0, 15
323 ; RV64I-NEXT: andi a0, a0, -16
324 ; RV64I-NEXT: sub a0, sp, a0
325 ; RV64I-NEXT: mv sp, a0
326 ; RV64I-NEXT: call callee_void
327 ; RV64I-NEXT: li a0, 1
328 ; RV64I-NEXT: addi sp, s0, -16
329 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
330 ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
331 ; RV64I-NEXT: addi sp, sp, 16
334 %0 = alloca i8, i32 %size, align 16
335 call void @callee_void(i8* nonnull %0)
339 define i32 @pushpopretneg1(i32 signext %size) {
340 ; RV32IZCMP-LABEL: pushpopretneg1:
341 ; RV32IZCMP: # %bb.0: # %entry
342 ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
343 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
344 ; RV32IZCMP-NEXT: .cfi_offset ra, -8
345 ; RV32IZCMP-NEXT: .cfi_offset s0, -4
346 ; RV32IZCMP-NEXT: addi s0, sp, 16
347 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
348 ; RV32IZCMP-NEXT: addi a0, a0, 15
349 ; RV32IZCMP-NEXT: andi a0, a0, -16
350 ; RV32IZCMP-NEXT: sub a0, sp, a0
351 ; RV32IZCMP-NEXT: mv sp, a0
352 ; RV32IZCMP-NEXT: call callee_void
353 ; RV32IZCMP-NEXT: li a0, -1
354 ; RV32IZCMP-NEXT: addi sp, s0, -16
355 ; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
357 ; RV64IZCMP-LABEL: pushpopretneg1:
358 ; RV64IZCMP: # %bb.0: # %entry
359 ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
360 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
361 ; RV64IZCMP-NEXT: .cfi_offset ra, -16
362 ; RV64IZCMP-NEXT: .cfi_offset s0, -8
363 ; RV64IZCMP-NEXT: addi s0, sp, 16
364 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
365 ; RV64IZCMP-NEXT: slli a0, a0, 32
366 ; RV64IZCMP-NEXT: srli a0, a0, 32
367 ; RV64IZCMP-NEXT: addi a0, a0, 15
368 ; RV64IZCMP-NEXT: andi a0, a0, -16
369 ; RV64IZCMP-NEXT: sub a0, sp, a0
370 ; RV64IZCMP-NEXT: mv sp, a0
371 ; RV64IZCMP-NEXT: call callee_void
372 ; RV64IZCMP-NEXT: li a0, -1
373 ; RV64IZCMP-NEXT: addi sp, s0, -16
374 ; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
376 ; RV32IZCMP-SR-LABEL: pushpopretneg1:
377 ; RV32IZCMP-SR: # %bb.0: # %entry
378 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
379 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
380 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
381 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
382 ; RV32IZCMP-SR-NEXT: addi s0, sp, 16
383 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
384 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15
385 ; RV32IZCMP-SR-NEXT: andi a0, a0, -16
386 ; RV32IZCMP-SR-NEXT: sub a0, sp, a0
387 ; RV32IZCMP-SR-NEXT: mv sp, a0
388 ; RV32IZCMP-SR-NEXT: call callee_void
389 ; RV32IZCMP-SR-NEXT: li a0, -1
390 ; RV32IZCMP-SR-NEXT: addi sp, s0, -16
391 ; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
393 ; RV64IZCMP-SR-LABEL: pushpopretneg1:
394 ; RV64IZCMP-SR: # %bb.0: # %entry
395 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
396 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
397 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
398 ; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
399 ; RV64IZCMP-SR-NEXT: addi s0, sp, 16
400 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
401 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32
402 ; RV64IZCMP-SR-NEXT: srli a0, a0, 32
403 ; RV64IZCMP-SR-NEXT: addi a0, a0, 15
404 ; RV64IZCMP-SR-NEXT: andi a0, a0, -16
405 ; RV64IZCMP-SR-NEXT: sub a0, sp, a0
406 ; RV64IZCMP-SR-NEXT: mv sp, a0
407 ; RV64IZCMP-SR-NEXT: call callee_void
408 ; RV64IZCMP-SR-NEXT: li a0, -1
409 ; RV64IZCMP-SR-NEXT: addi sp, s0, -16
410 ; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
412 ; RV32I-LABEL: pushpopretneg1:
413 ; RV32I: # %bb.0: # %entry
414 ; RV32I-NEXT: addi sp, sp, -16
415 ; RV32I-NEXT: .cfi_def_cfa_offset 16
416 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
417 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
418 ; RV32I-NEXT: .cfi_offset ra, -4
419 ; RV32I-NEXT: .cfi_offset s0, -8
420 ; RV32I-NEXT: addi s0, sp, 16
421 ; RV32I-NEXT: .cfi_def_cfa s0, 0
422 ; RV32I-NEXT: addi a0, a0, 15
423 ; RV32I-NEXT: andi a0, a0, -16
424 ; RV32I-NEXT: sub a0, sp, a0
425 ; RV32I-NEXT: mv sp, a0
426 ; RV32I-NEXT: call callee_void
427 ; RV32I-NEXT: li a0, -1
428 ; RV32I-NEXT: addi sp, s0, -16
429 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
430 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
431 ; RV32I-NEXT: addi sp, sp, 16
434 ; RV64I-LABEL: pushpopretneg1:
435 ; RV64I: # %bb.0: # %entry
436 ; RV64I-NEXT: addi sp, sp, -16
437 ; RV64I-NEXT: .cfi_def_cfa_offset 16
438 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
439 ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
440 ; RV64I-NEXT: .cfi_offset ra, -8
441 ; RV64I-NEXT: .cfi_offset s0, -16
442 ; RV64I-NEXT: addi s0, sp, 16
443 ; RV64I-NEXT: .cfi_def_cfa s0, 0
444 ; RV64I-NEXT: slli a0, a0, 32
445 ; RV64I-NEXT: srli a0, a0, 32
446 ; RV64I-NEXT: addi a0, a0, 15
447 ; RV64I-NEXT: andi a0, a0, -16
448 ; RV64I-NEXT: sub a0, sp, a0
449 ; RV64I-NEXT: mv sp, a0
450 ; RV64I-NEXT: call callee_void
451 ; RV64I-NEXT: li a0, -1
452 ; RV64I-NEXT: addi sp, s0, -16
453 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
454 ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
455 ; RV64I-NEXT: addi sp, sp, 16
458 %0 = alloca i8, i32 %size, align 16
459 call void @callee_void(i8* nonnull %0)
463 define i32 @pushpopret2(i32 signext %size) {
464 ; RV32IZCMP-LABEL: pushpopret2:
465 ; RV32IZCMP: # %bb.0: # %entry
466 ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
467 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
468 ; RV32IZCMP-NEXT: .cfi_offset ra, -8
469 ; RV32IZCMP-NEXT: .cfi_offset s0, -4
470 ; RV32IZCMP-NEXT: addi s0, sp, 16
471 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
472 ; RV32IZCMP-NEXT: addi a0, a0, 15
473 ; RV32IZCMP-NEXT: andi a0, a0, -16
474 ; RV32IZCMP-NEXT: sub a0, sp, a0
475 ; RV32IZCMP-NEXT: mv sp, a0
476 ; RV32IZCMP-NEXT: call callee_void
477 ; RV32IZCMP-NEXT: li a0, 2
478 ; RV32IZCMP-NEXT: addi sp, s0, -16
479 ; RV32IZCMP-NEXT: cm.popret {ra, s0}, 16
481 ; RV64IZCMP-LABEL: pushpopret2:
482 ; RV64IZCMP: # %bb.0: # %entry
483 ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
484 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
485 ; RV64IZCMP-NEXT: .cfi_offset ra, -16
486 ; RV64IZCMP-NEXT: .cfi_offset s0, -8
487 ; RV64IZCMP-NEXT: addi s0, sp, 16
488 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
489 ; RV64IZCMP-NEXT: slli a0, a0, 32
490 ; RV64IZCMP-NEXT: srli a0, a0, 32
491 ; RV64IZCMP-NEXT: addi a0, a0, 15
492 ; RV64IZCMP-NEXT: andi a0, a0, -16
493 ; RV64IZCMP-NEXT: sub a0, sp, a0
494 ; RV64IZCMP-NEXT: mv sp, a0
495 ; RV64IZCMP-NEXT: call callee_void
496 ; RV64IZCMP-NEXT: li a0, 2
497 ; RV64IZCMP-NEXT: addi sp, s0, -16
498 ; RV64IZCMP-NEXT: cm.popret {ra, s0}, 16
500 ; RV32IZCMP-SR-LABEL: pushpopret2:
501 ; RV32IZCMP-SR: # %bb.0: # %entry
502 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
503 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
504 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
505 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
506 ; RV32IZCMP-SR-NEXT: addi s0, sp, 16
507 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
508 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15
509 ; RV32IZCMP-SR-NEXT: andi a0, a0, -16
510 ; RV32IZCMP-SR-NEXT: sub a0, sp, a0
511 ; RV32IZCMP-SR-NEXT: mv sp, a0
512 ; RV32IZCMP-SR-NEXT: call callee_void
513 ; RV32IZCMP-SR-NEXT: li a0, 2
514 ; RV32IZCMP-SR-NEXT: addi sp, s0, -16
515 ; RV32IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
517 ; RV64IZCMP-SR-LABEL: pushpopret2:
518 ; RV64IZCMP-SR: # %bb.0: # %entry
519 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
520 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
521 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
522 ; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
523 ; RV64IZCMP-SR-NEXT: addi s0, sp, 16
524 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
525 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32
526 ; RV64IZCMP-SR-NEXT: srli a0, a0, 32
527 ; RV64IZCMP-SR-NEXT: addi a0, a0, 15
528 ; RV64IZCMP-SR-NEXT: andi a0, a0, -16
529 ; RV64IZCMP-SR-NEXT: sub a0, sp, a0
530 ; RV64IZCMP-SR-NEXT: mv sp, a0
531 ; RV64IZCMP-SR-NEXT: call callee_void
532 ; RV64IZCMP-SR-NEXT: li a0, 2
533 ; RV64IZCMP-SR-NEXT: addi sp, s0, -16
534 ; RV64IZCMP-SR-NEXT: cm.popret {ra, s0}, 16
536 ; RV32I-LABEL: pushpopret2:
537 ; RV32I: # %bb.0: # %entry
538 ; RV32I-NEXT: addi sp, sp, -16
539 ; RV32I-NEXT: .cfi_def_cfa_offset 16
540 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
541 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
542 ; RV32I-NEXT: .cfi_offset ra, -4
543 ; RV32I-NEXT: .cfi_offset s0, -8
544 ; RV32I-NEXT: addi s0, sp, 16
545 ; RV32I-NEXT: .cfi_def_cfa s0, 0
546 ; RV32I-NEXT: addi a0, a0, 15
547 ; RV32I-NEXT: andi a0, a0, -16
548 ; RV32I-NEXT: sub a0, sp, a0
549 ; RV32I-NEXT: mv sp, a0
550 ; RV32I-NEXT: call callee_void
551 ; RV32I-NEXT: li a0, 2
552 ; RV32I-NEXT: addi sp, s0, -16
553 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
554 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
555 ; RV32I-NEXT: addi sp, sp, 16
558 ; RV64I-LABEL: pushpopret2:
559 ; RV64I: # %bb.0: # %entry
560 ; RV64I-NEXT: addi sp, sp, -16
561 ; RV64I-NEXT: .cfi_def_cfa_offset 16
562 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
563 ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
564 ; RV64I-NEXT: .cfi_offset ra, -8
565 ; RV64I-NEXT: .cfi_offset s0, -16
566 ; RV64I-NEXT: addi s0, sp, 16
567 ; RV64I-NEXT: .cfi_def_cfa s0, 0
568 ; RV64I-NEXT: slli a0, a0, 32
569 ; RV64I-NEXT: srli a0, a0, 32
570 ; RV64I-NEXT: addi a0, a0, 15
571 ; RV64I-NEXT: andi a0, a0, -16
572 ; RV64I-NEXT: sub a0, sp, a0
573 ; RV64I-NEXT: mv sp, a0
574 ; RV64I-NEXT: call callee_void
575 ; RV64I-NEXT: li a0, 2
576 ; RV64I-NEXT: addi sp, s0, -16
577 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
578 ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
579 ; RV64I-NEXT: addi sp, sp, 16
582 %0 = alloca i8, i32 %size, align 16
583 call void @callee_void(i8* nonnull %0)
587 define dso_local i32 @tailcall(i32 signext %size) local_unnamed_addr #0 {
588 ; RV32IZCMP-LABEL: tailcall:
589 ; RV32IZCMP: # %bb.0: # %entry
590 ; RV32IZCMP-NEXT: cm.push {ra, s0}, -16
591 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 16
592 ; RV32IZCMP-NEXT: .cfi_offset ra, -8
593 ; RV32IZCMP-NEXT: .cfi_offset s0, -4
594 ; RV32IZCMP-NEXT: addi s0, sp, 16
595 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
596 ; RV32IZCMP-NEXT: addi a0, a0, 15
597 ; RV32IZCMP-NEXT: andi a0, a0, -16
598 ; RV32IZCMP-NEXT: sub a0, sp, a0
599 ; RV32IZCMP-NEXT: mv sp, a0
600 ; RV32IZCMP-NEXT: addi sp, s0, -16
601 ; RV32IZCMP-NEXT: cm.pop {ra, s0}, 16
602 ; RV32IZCMP-NEXT: tail callee
604 ; RV64IZCMP-LABEL: tailcall:
605 ; RV64IZCMP: # %bb.0: # %entry
606 ; RV64IZCMP-NEXT: cm.push {ra, s0}, -16
607 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 16
608 ; RV64IZCMP-NEXT: .cfi_offset ra, -16
609 ; RV64IZCMP-NEXT: .cfi_offset s0, -8
610 ; RV64IZCMP-NEXT: addi s0, sp, 16
611 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
612 ; RV64IZCMP-NEXT: slli a0, a0, 32
613 ; RV64IZCMP-NEXT: srli a0, a0, 32
614 ; RV64IZCMP-NEXT: addi a0, a0, 15
615 ; RV64IZCMP-NEXT: andi a0, a0, -16
616 ; RV64IZCMP-NEXT: sub a0, sp, a0
617 ; RV64IZCMP-NEXT: mv sp, a0
618 ; RV64IZCMP-NEXT: addi sp, s0, -16
619 ; RV64IZCMP-NEXT: cm.pop {ra, s0}, 16
620 ; RV64IZCMP-NEXT: tail callee
622 ; RV32IZCMP-SR-LABEL: tailcall:
623 ; RV32IZCMP-SR: # %bb.0: # %entry
624 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0}, -16
625 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
626 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -8
627 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -4
628 ; RV32IZCMP-SR-NEXT: addi s0, sp, 16
629 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
630 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15
631 ; RV32IZCMP-SR-NEXT: andi a0, a0, -16
632 ; RV32IZCMP-SR-NEXT: sub a0, sp, a0
633 ; RV32IZCMP-SR-NEXT: mv sp, a0
634 ; RV32IZCMP-SR-NEXT: addi sp, s0, -16
635 ; RV32IZCMP-SR-NEXT: cm.pop {ra, s0}, 16
636 ; RV32IZCMP-SR-NEXT: tail callee
638 ; RV64IZCMP-SR-LABEL: tailcall:
639 ; RV64IZCMP-SR: # %bb.0: # %entry
640 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0}, -16
641 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 16
642 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -16
643 ; RV64IZCMP-SR-NEXT: .cfi_offset s0, -8
644 ; RV64IZCMP-SR-NEXT: addi s0, sp, 16
645 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
646 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32
647 ; RV64IZCMP-SR-NEXT: srli a0, a0, 32
648 ; RV64IZCMP-SR-NEXT: addi a0, a0, 15
649 ; RV64IZCMP-SR-NEXT: andi a0, a0, -16
650 ; RV64IZCMP-SR-NEXT: sub a0, sp, a0
651 ; RV64IZCMP-SR-NEXT: mv sp, a0
652 ; RV64IZCMP-SR-NEXT: addi sp, s0, -16
653 ; RV64IZCMP-SR-NEXT: cm.pop {ra, s0}, 16
654 ; RV64IZCMP-SR-NEXT: tail callee
656 ; RV32I-LABEL: tailcall:
657 ; RV32I: # %bb.0: # %entry
658 ; RV32I-NEXT: addi sp, sp, -16
659 ; RV32I-NEXT: .cfi_def_cfa_offset 16
660 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
661 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
662 ; RV32I-NEXT: .cfi_offset ra, -4
663 ; RV32I-NEXT: .cfi_offset s0, -8
664 ; RV32I-NEXT: addi s0, sp, 16
665 ; RV32I-NEXT: .cfi_def_cfa s0, 0
666 ; RV32I-NEXT: addi a0, a0, 15
667 ; RV32I-NEXT: andi a0, a0, -16
668 ; RV32I-NEXT: sub a0, sp, a0
669 ; RV32I-NEXT: mv sp, a0
670 ; RV32I-NEXT: addi sp, s0, -16
671 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
672 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
673 ; RV32I-NEXT: addi sp, sp, 16
674 ; RV32I-NEXT: tail callee
676 ; RV64I-LABEL: tailcall:
677 ; RV64I: # %bb.0: # %entry
678 ; RV64I-NEXT: addi sp, sp, -16
679 ; RV64I-NEXT: .cfi_def_cfa_offset 16
680 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
681 ; RV64I-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
682 ; RV64I-NEXT: .cfi_offset ra, -8
683 ; RV64I-NEXT: .cfi_offset s0, -16
684 ; RV64I-NEXT: addi s0, sp, 16
685 ; RV64I-NEXT: .cfi_def_cfa s0, 0
686 ; RV64I-NEXT: slli a0, a0, 32
687 ; RV64I-NEXT: srli a0, a0, 32
688 ; RV64I-NEXT: addi a0, a0, 15
689 ; RV64I-NEXT: andi a0, a0, -16
690 ; RV64I-NEXT: sub a0, sp, a0
691 ; RV64I-NEXT: mv sp, a0
692 ; RV64I-NEXT: addi sp, s0, -16
693 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
694 ; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
695 ; RV64I-NEXT: addi sp, sp, 16
696 ; RV64I-NEXT: tail callee
698 %0 = alloca i8, i32 %size, align 16
699 %1 = tail call i32 @callee(i8* nonnull %0)
703 @var = global [5 x i32] zeroinitializer
704 define i32 @nocompress(i32 signext %size) {
705 ; RV32IZCMP-LABEL: nocompress:
706 ; RV32IZCMP: # %bb.0: # %entry
707 ; RV32IZCMP-NEXT: cm.push {ra, s0-s8}, -48
708 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 48
709 ; RV32IZCMP-NEXT: .cfi_offset ra, -40
710 ; RV32IZCMP-NEXT: .cfi_offset s0, -36
711 ; RV32IZCMP-NEXT: .cfi_offset s1, -32
712 ; RV32IZCMP-NEXT: .cfi_offset s2, -28
713 ; RV32IZCMP-NEXT: .cfi_offset s3, -24
714 ; RV32IZCMP-NEXT: .cfi_offset s4, -20
715 ; RV32IZCMP-NEXT: .cfi_offset s5, -16
716 ; RV32IZCMP-NEXT: .cfi_offset s6, -12
717 ; RV32IZCMP-NEXT: .cfi_offset s7, -8
718 ; RV32IZCMP-NEXT: .cfi_offset s8, -4
719 ; RV32IZCMP-NEXT: addi s0, sp, 48
720 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
721 ; RV32IZCMP-NEXT: addi a0, a0, 15
722 ; RV32IZCMP-NEXT: andi a0, a0, -16
723 ; RV32IZCMP-NEXT: sub s2, sp, a0
724 ; RV32IZCMP-NEXT: mv sp, s2
725 ; RV32IZCMP-NEXT: lui s1, %hi(var)
726 ; RV32IZCMP-NEXT: lw s3, %lo(var)(s1)
727 ; RV32IZCMP-NEXT: lw s4, %lo(var+4)(s1)
728 ; RV32IZCMP-NEXT: lw s5, %lo(var+8)(s1)
729 ; RV32IZCMP-NEXT: lw s6, %lo(var+12)(s1)
730 ; RV32IZCMP-NEXT: addi s7, s1, %lo(var)
731 ; RV32IZCMP-NEXT: lw s8, 16(s7)
732 ; RV32IZCMP-NEXT: mv a0, s2
733 ; RV32IZCMP-NEXT: call callee_void
734 ; RV32IZCMP-NEXT: sw s8, 16(s7)
735 ; RV32IZCMP-NEXT: sw s6, %lo(var+12)(s1)
736 ; RV32IZCMP-NEXT: sw s5, %lo(var+8)(s1)
737 ; RV32IZCMP-NEXT: sw s4, %lo(var+4)(s1)
738 ; RV32IZCMP-NEXT: sw s3, %lo(var)(s1)
739 ; RV32IZCMP-NEXT: mv a0, s2
740 ; RV32IZCMP-NEXT: addi sp, s0, -48
741 ; RV32IZCMP-NEXT: cm.pop {ra, s0-s8}, 48
742 ; RV32IZCMP-NEXT: tail callee
744 ; RV64IZCMP-LABEL: nocompress:
745 ; RV64IZCMP: # %bb.0: # %entry
746 ; RV64IZCMP-NEXT: cm.push {ra, s0-s8}, -80
747 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 80
748 ; RV64IZCMP-NEXT: .cfi_offset ra, -80
749 ; RV64IZCMP-NEXT: .cfi_offset s0, -72
750 ; RV64IZCMP-NEXT: .cfi_offset s1, -64
751 ; RV64IZCMP-NEXT: .cfi_offset s2, -56
752 ; RV64IZCMP-NEXT: .cfi_offset s3, -48
753 ; RV64IZCMP-NEXT: .cfi_offset s4, -40
754 ; RV64IZCMP-NEXT: .cfi_offset s5, -32
755 ; RV64IZCMP-NEXT: .cfi_offset s6, -24
756 ; RV64IZCMP-NEXT: .cfi_offset s7, -16
757 ; RV64IZCMP-NEXT: .cfi_offset s8, -8
758 ; RV64IZCMP-NEXT: addi s0, sp, 80
759 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
760 ; RV64IZCMP-NEXT: slli a0, a0, 32
761 ; RV64IZCMP-NEXT: srli a0, a0, 32
762 ; RV64IZCMP-NEXT: addi a0, a0, 15
763 ; RV64IZCMP-NEXT: andi a0, a0, -16
764 ; RV64IZCMP-NEXT: sub s2, sp, a0
765 ; RV64IZCMP-NEXT: mv sp, s2
766 ; RV64IZCMP-NEXT: lui s1, %hi(var)
767 ; RV64IZCMP-NEXT: lw s3, %lo(var)(s1)
768 ; RV64IZCMP-NEXT: lw s4, %lo(var+4)(s1)
769 ; RV64IZCMP-NEXT: lw s5, %lo(var+8)(s1)
770 ; RV64IZCMP-NEXT: lw s6, %lo(var+12)(s1)
771 ; RV64IZCMP-NEXT: addi s7, s1, %lo(var)
772 ; RV64IZCMP-NEXT: lw s8, 16(s7)
773 ; RV64IZCMP-NEXT: mv a0, s2
774 ; RV64IZCMP-NEXT: call callee_void
775 ; RV64IZCMP-NEXT: sw s8, 16(s7)
776 ; RV64IZCMP-NEXT: sw s6, %lo(var+12)(s1)
777 ; RV64IZCMP-NEXT: sw s5, %lo(var+8)(s1)
778 ; RV64IZCMP-NEXT: sw s4, %lo(var+4)(s1)
779 ; RV64IZCMP-NEXT: sw s3, %lo(var)(s1)
780 ; RV64IZCMP-NEXT: mv a0, s2
781 ; RV64IZCMP-NEXT: addi sp, s0, -80
782 ; RV64IZCMP-NEXT: cm.pop {ra, s0-s8}, 80
783 ; RV64IZCMP-NEXT: tail callee
785 ; RV32IZCMP-SR-LABEL: nocompress:
786 ; RV32IZCMP-SR: # %bb.0: # %entry
787 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s8}, -48
788 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
789 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -40
790 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -36
791 ; RV32IZCMP-SR-NEXT: .cfi_offset s1, -32
792 ; RV32IZCMP-SR-NEXT: .cfi_offset s2, -28
793 ; RV32IZCMP-SR-NEXT: .cfi_offset s3, -24
794 ; RV32IZCMP-SR-NEXT: .cfi_offset s4, -20
795 ; RV32IZCMP-SR-NEXT: .cfi_offset s5, -16
796 ; RV32IZCMP-SR-NEXT: .cfi_offset s6, -12
797 ; RV32IZCMP-SR-NEXT: .cfi_offset s7, -8
798 ; RV32IZCMP-SR-NEXT: .cfi_offset s8, -4
799 ; RV32IZCMP-SR-NEXT: addi s0, sp, 48
800 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
801 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15
802 ; RV32IZCMP-SR-NEXT: andi a0, a0, -16
803 ; RV32IZCMP-SR-NEXT: sub s2, sp, a0
804 ; RV32IZCMP-SR-NEXT: mv sp, s2
805 ; RV32IZCMP-SR-NEXT: lui s1, %hi(var)
806 ; RV32IZCMP-SR-NEXT: lw s3, %lo(var)(s1)
807 ; RV32IZCMP-SR-NEXT: lw s4, %lo(var+4)(s1)
808 ; RV32IZCMP-SR-NEXT: lw s5, %lo(var+8)(s1)
809 ; RV32IZCMP-SR-NEXT: lw s6, %lo(var+12)(s1)
810 ; RV32IZCMP-SR-NEXT: addi s7, s1, %lo(var)
811 ; RV32IZCMP-SR-NEXT: lw s8, 16(s7)
812 ; RV32IZCMP-SR-NEXT: mv a0, s2
813 ; RV32IZCMP-SR-NEXT: call callee_void
814 ; RV32IZCMP-SR-NEXT: sw s8, 16(s7)
815 ; RV32IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
816 ; RV32IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
817 ; RV32IZCMP-SR-NEXT: sw s4, %lo(var+4)(s1)
818 ; RV32IZCMP-SR-NEXT: sw s3, %lo(var)(s1)
819 ; RV32IZCMP-SR-NEXT: mv a0, s2
820 ; RV32IZCMP-SR-NEXT: addi sp, s0, -48
821 ; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s8}, 48
822 ; RV32IZCMP-SR-NEXT: tail callee
824 ; RV64IZCMP-SR-LABEL: nocompress:
825 ; RV64IZCMP-SR: # %bb.0: # %entry
826 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s8}, -80
827 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 80
828 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -80
829 ; RV64IZCMP-SR-NEXT: .cfi_offset s0, -72
830 ; RV64IZCMP-SR-NEXT: .cfi_offset s1, -64
831 ; RV64IZCMP-SR-NEXT: .cfi_offset s2, -56
832 ; RV64IZCMP-SR-NEXT: .cfi_offset s3, -48
833 ; RV64IZCMP-SR-NEXT: .cfi_offset s4, -40
834 ; RV64IZCMP-SR-NEXT: .cfi_offset s5, -32
835 ; RV64IZCMP-SR-NEXT: .cfi_offset s6, -24
836 ; RV64IZCMP-SR-NEXT: .cfi_offset s7, -16
837 ; RV64IZCMP-SR-NEXT: .cfi_offset s8, -8
838 ; RV64IZCMP-SR-NEXT: addi s0, sp, 80
839 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
840 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32
841 ; RV64IZCMP-SR-NEXT: srli a0, a0, 32
842 ; RV64IZCMP-SR-NEXT: addi a0, a0, 15
843 ; RV64IZCMP-SR-NEXT: andi a0, a0, -16
844 ; RV64IZCMP-SR-NEXT: sub s2, sp, a0
845 ; RV64IZCMP-SR-NEXT: mv sp, s2
846 ; RV64IZCMP-SR-NEXT: lui s1, %hi(var)
847 ; RV64IZCMP-SR-NEXT: lw s3, %lo(var)(s1)
848 ; RV64IZCMP-SR-NEXT: lw s4, %lo(var+4)(s1)
849 ; RV64IZCMP-SR-NEXT: lw s5, %lo(var+8)(s1)
850 ; RV64IZCMP-SR-NEXT: lw s6, %lo(var+12)(s1)
851 ; RV64IZCMP-SR-NEXT: addi s7, s1, %lo(var)
852 ; RV64IZCMP-SR-NEXT: lw s8, 16(s7)
853 ; RV64IZCMP-SR-NEXT: mv a0, s2
854 ; RV64IZCMP-SR-NEXT: call callee_void
855 ; RV64IZCMP-SR-NEXT: sw s8, 16(s7)
856 ; RV64IZCMP-SR-NEXT: sw s6, %lo(var+12)(s1)
857 ; RV64IZCMP-SR-NEXT: sw s5, %lo(var+8)(s1)
858 ; RV64IZCMP-SR-NEXT: sw s4, %lo(var+4)(s1)
859 ; RV64IZCMP-SR-NEXT: sw s3, %lo(var)(s1)
860 ; RV64IZCMP-SR-NEXT: mv a0, s2
861 ; RV64IZCMP-SR-NEXT: addi sp, s0, -80
862 ; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s8}, 80
863 ; RV64IZCMP-SR-NEXT: tail callee
865 ; RV32I-LABEL: nocompress:
866 ; RV32I: # %bb.0: # %entry
867 ; RV32I-NEXT: addi sp, sp, -48
868 ; RV32I-NEXT: .cfi_def_cfa_offset 48
869 ; RV32I-NEXT: sw ra, 44(sp) # 4-byte Folded Spill
870 ; RV32I-NEXT: sw s0, 40(sp) # 4-byte Folded Spill
871 ; RV32I-NEXT: sw s1, 36(sp) # 4-byte Folded Spill
872 ; RV32I-NEXT: sw s2, 32(sp) # 4-byte Folded Spill
873 ; RV32I-NEXT: sw s3, 28(sp) # 4-byte Folded Spill
874 ; RV32I-NEXT: sw s4, 24(sp) # 4-byte Folded Spill
875 ; RV32I-NEXT: sw s5, 20(sp) # 4-byte Folded Spill
876 ; RV32I-NEXT: sw s6, 16(sp) # 4-byte Folded Spill
877 ; RV32I-NEXT: sw s7, 12(sp) # 4-byte Folded Spill
878 ; RV32I-NEXT: sw s8, 8(sp) # 4-byte Folded Spill
879 ; RV32I-NEXT: .cfi_offset ra, -4
880 ; RV32I-NEXT: .cfi_offset s0, -8
881 ; RV32I-NEXT: .cfi_offset s1, -12
882 ; RV32I-NEXT: .cfi_offset s2, -16
883 ; RV32I-NEXT: .cfi_offset s3, -20
884 ; RV32I-NEXT: .cfi_offset s4, -24
885 ; RV32I-NEXT: .cfi_offset s5, -28
886 ; RV32I-NEXT: .cfi_offset s6, -32
887 ; RV32I-NEXT: .cfi_offset s7, -36
888 ; RV32I-NEXT: .cfi_offset s8, -40
889 ; RV32I-NEXT: addi s0, sp, 48
890 ; RV32I-NEXT: .cfi_def_cfa s0, 0
891 ; RV32I-NEXT: addi a0, a0, 15
892 ; RV32I-NEXT: andi a0, a0, -16
893 ; RV32I-NEXT: sub s1, sp, a0
894 ; RV32I-NEXT: mv sp, s1
895 ; RV32I-NEXT: lui s2, %hi(var)
896 ; RV32I-NEXT: lw s3, %lo(var)(s2)
897 ; RV32I-NEXT: lw s4, %lo(var+4)(s2)
898 ; RV32I-NEXT: lw s5, %lo(var+8)(s2)
899 ; RV32I-NEXT: lw s6, %lo(var+12)(s2)
900 ; RV32I-NEXT: addi s7, s2, %lo(var)
901 ; RV32I-NEXT: lw s8, 16(s7)
902 ; RV32I-NEXT: mv a0, s1
903 ; RV32I-NEXT: call callee_void
904 ; RV32I-NEXT: sw s8, 16(s7)
905 ; RV32I-NEXT: sw s6, %lo(var+12)(s2)
906 ; RV32I-NEXT: sw s5, %lo(var+8)(s2)
907 ; RV32I-NEXT: sw s4, %lo(var+4)(s2)
908 ; RV32I-NEXT: sw s3, %lo(var)(s2)
909 ; RV32I-NEXT: mv a0, s1
910 ; RV32I-NEXT: addi sp, s0, -48
911 ; RV32I-NEXT: lw ra, 44(sp) # 4-byte Folded Reload
912 ; RV32I-NEXT: lw s0, 40(sp) # 4-byte Folded Reload
913 ; RV32I-NEXT: lw s1, 36(sp) # 4-byte Folded Reload
914 ; RV32I-NEXT: lw s2, 32(sp) # 4-byte Folded Reload
915 ; RV32I-NEXT: lw s3, 28(sp) # 4-byte Folded Reload
916 ; RV32I-NEXT: lw s4, 24(sp) # 4-byte Folded Reload
917 ; RV32I-NEXT: lw s5, 20(sp) # 4-byte Folded Reload
918 ; RV32I-NEXT: lw s6, 16(sp) # 4-byte Folded Reload
919 ; RV32I-NEXT: lw s7, 12(sp) # 4-byte Folded Reload
920 ; RV32I-NEXT: lw s8, 8(sp) # 4-byte Folded Reload
921 ; RV32I-NEXT: addi sp, sp, 48
922 ; RV32I-NEXT: tail callee
924 ; RV64I-LABEL: nocompress:
925 ; RV64I: # %bb.0: # %entry
926 ; RV64I-NEXT: addi sp, sp, -80
927 ; RV64I-NEXT: .cfi_def_cfa_offset 80
928 ; RV64I-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
929 ; RV64I-NEXT: sd s0, 64(sp) # 8-byte Folded Spill
930 ; RV64I-NEXT: sd s1, 56(sp) # 8-byte Folded Spill
931 ; RV64I-NEXT: sd s2, 48(sp) # 8-byte Folded Spill
932 ; RV64I-NEXT: sd s3, 40(sp) # 8-byte Folded Spill
933 ; RV64I-NEXT: sd s4, 32(sp) # 8-byte Folded Spill
934 ; RV64I-NEXT: sd s5, 24(sp) # 8-byte Folded Spill
935 ; RV64I-NEXT: sd s6, 16(sp) # 8-byte Folded Spill
936 ; RV64I-NEXT: sd s7, 8(sp) # 8-byte Folded Spill
937 ; RV64I-NEXT: sd s8, 0(sp) # 8-byte Folded Spill
938 ; RV64I-NEXT: .cfi_offset ra, -8
939 ; RV64I-NEXT: .cfi_offset s0, -16
940 ; RV64I-NEXT: .cfi_offset s1, -24
941 ; RV64I-NEXT: .cfi_offset s2, -32
942 ; RV64I-NEXT: .cfi_offset s3, -40
943 ; RV64I-NEXT: .cfi_offset s4, -48
944 ; RV64I-NEXT: .cfi_offset s5, -56
945 ; RV64I-NEXT: .cfi_offset s6, -64
946 ; RV64I-NEXT: .cfi_offset s7, -72
947 ; RV64I-NEXT: .cfi_offset s8, -80
948 ; RV64I-NEXT: addi s0, sp, 80
949 ; RV64I-NEXT: .cfi_def_cfa s0, 0
950 ; RV64I-NEXT: slli a0, a0, 32
951 ; RV64I-NEXT: srli a0, a0, 32
952 ; RV64I-NEXT: addi a0, a0, 15
953 ; RV64I-NEXT: andi a0, a0, -16
954 ; RV64I-NEXT: sub s1, sp, a0
955 ; RV64I-NEXT: mv sp, s1
956 ; RV64I-NEXT: lui s2, %hi(var)
957 ; RV64I-NEXT: lw s3, %lo(var)(s2)
958 ; RV64I-NEXT: lw s4, %lo(var+4)(s2)
959 ; RV64I-NEXT: lw s5, %lo(var+8)(s2)
960 ; RV64I-NEXT: lw s6, %lo(var+12)(s2)
961 ; RV64I-NEXT: addi s7, s2, %lo(var)
962 ; RV64I-NEXT: lw s8, 16(s7)
963 ; RV64I-NEXT: mv a0, s1
964 ; RV64I-NEXT: call callee_void
965 ; RV64I-NEXT: sw s8, 16(s7)
966 ; RV64I-NEXT: sw s6, %lo(var+12)(s2)
967 ; RV64I-NEXT: sw s5, %lo(var+8)(s2)
968 ; RV64I-NEXT: sw s4, %lo(var+4)(s2)
969 ; RV64I-NEXT: sw s3, %lo(var)(s2)
970 ; RV64I-NEXT: mv a0, s1
971 ; RV64I-NEXT: addi sp, s0, -80
972 ; RV64I-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
973 ; RV64I-NEXT: ld s0, 64(sp) # 8-byte Folded Reload
974 ; RV64I-NEXT: ld s1, 56(sp) # 8-byte Folded Reload
975 ; RV64I-NEXT: ld s2, 48(sp) # 8-byte Folded Reload
976 ; RV64I-NEXT: ld s3, 40(sp) # 8-byte Folded Reload
977 ; RV64I-NEXT: ld s4, 32(sp) # 8-byte Folded Reload
978 ; RV64I-NEXT: ld s5, 24(sp) # 8-byte Folded Reload
979 ; RV64I-NEXT: ld s6, 16(sp) # 8-byte Folded Reload
980 ; RV64I-NEXT: ld s7, 8(sp) # 8-byte Folded Reload
981 ; RV64I-NEXT: ld s8, 0(sp) # 8-byte Folded Reload
982 ; RV64I-NEXT: addi sp, sp, 80
983 ; RV64I-NEXT: tail callee
985 %0 = alloca i8, i32 %size, align 16
986 %val = load [5 x i32], [5 x i32]* @var
987 call void @callee_void(i8* nonnull %0)
988 store volatile [5 x i32] %val, [5 x i32]* @var
989 %1 = tail call i32 @callee(i8* nonnull %0)
993 ; Check that functions with varargs do not use save/restore code
995 declare void @llvm.va_start(ptr)
996 declare void @llvm.va_end(ptr)
998 define i32 @varargs(ptr %fmt, ...) nounwind {
999 ; RV32IZCMP-LABEL: varargs:
1000 ; RV32IZCMP: # %bb.0:
1001 ; RV32IZCMP-NEXT: addi sp, sp, -48
1002 ; RV32IZCMP-NEXT: mv a0, a1
1003 ; RV32IZCMP-NEXT: sw a7, 44(sp)
1004 ; RV32IZCMP-NEXT: sw a6, 40(sp)
1005 ; RV32IZCMP-NEXT: sw a5, 36(sp)
1006 ; RV32IZCMP-NEXT: sw a4, 32(sp)
1007 ; RV32IZCMP-NEXT: sw a3, 28(sp)
1008 ; RV32IZCMP-NEXT: sw a2, 24(sp)
1009 ; RV32IZCMP-NEXT: sw a1, 20(sp)
1010 ; RV32IZCMP-NEXT: addi a1, sp, 24
1011 ; RV32IZCMP-NEXT: sw a1, 12(sp)
1012 ; RV32IZCMP-NEXT: addi sp, sp, 48
1013 ; RV32IZCMP-NEXT: ret
1015 ; RV64IZCMP-LABEL: varargs:
1016 ; RV64IZCMP: # %bb.0:
1017 ; RV64IZCMP-NEXT: addi sp, sp, -80
1018 ; RV64IZCMP-NEXT: sd a1, 24(sp)
1019 ; RV64IZCMP-NEXT: sd a7, 72(sp)
1020 ; RV64IZCMP-NEXT: sd a6, 64(sp)
1021 ; RV64IZCMP-NEXT: addi a0, sp, 28
1022 ; RV64IZCMP-NEXT: sd a0, 8(sp)
1023 ; RV64IZCMP-NEXT: lw a0, 24(sp)
1024 ; RV64IZCMP-NEXT: sd a5, 56(sp)
1025 ; RV64IZCMP-NEXT: sd a4, 48(sp)
1026 ; RV64IZCMP-NEXT: sd a3, 40(sp)
1027 ; RV64IZCMP-NEXT: sd a2, 32(sp)
1028 ; RV64IZCMP-NEXT: addi sp, sp, 80
1029 ; RV64IZCMP-NEXT: ret
1031 ; RV32IZCMP-SR-LABEL: varargs:
1032 ; RV32IZCMP-SR: # %bb.0:
1033 ; RV32IZCMP-SR-NEXT: addi sp, sp, -48
1034 ; RV32IZCMP-SR-NEXT: mv a0, a1
1035 ; RV32IZCMP-SR-NEXT: sw a7, 44(sp)
1036 ; RV32IZCMP-SR-NEXT: sw a6, 40(sp)
1037 ; RV32IZCMP-SR-NEXT: sw a5, 36(sp)
1038 ; RV32IZCMP-SR-NEXT: sw a4, 32(sp)
1039 ; RV32IZCMP-SR-NEXT: sw a3, 28(sp)
1040 ; RV32IZCMP-SR-NEXT: sw a2, 24(sp)
1041 ; RV32IZCMP-SR-NEXT: sw a1, 20(sp)
1042 ; RV32IZCMP-SR-NEXT: addi a1, sp, 24
1043 ; RV32IZCMP-SR-NEXT: sw a1, 12(sp)
1044 ; RV32IZCMP-SR-NEXT: addi sp, sp, 48
1045 ; RV32IZCMP-SR-NEXT: ret
1047 ; RV64IZCMP-SR-LABEL: varargs:
1048 ; RV64IZCMP-SR: # %bb.0:
1049 ; RV64IZCMP-SR-NEXT: addi sp, sp, -80
1050 ; RV64IZCMP-SR-NEXT: sd a1, 24(sp)
1051 ; RV64IZCMP-SR-NEXT: sd a7, 72(sp)
1052 ; RV64IZCMP-SR-NEXT: sd a6, 64(sp)
1053 ; RV64IZCMP-SR-NEXT: addi a0, sp, 28
1054 ; RV64IZCMP-SR-NEXT: sd a0, 8(sp)
1055 ; RV64IZCMP-SR-NEXT: lw a0, 24(sp)
1056 ; RV64IZCMP-SR-NEXT: sd a5, 56(sp)
1057 ; RV64IZCMP-SR-NEXT: sd a4, 48(sp)
1058 ; RV64IZCMP-SR-NEXT: sd a3, 40(sp)
1059 ; RV64IZCMP-SR-NEXT: sd a2, 32(sp)
1060 ; RV64IZCMP-SR-NEXT: addi sp, sp, 80
1061 ; RV64IZCMP-SR-NEXT: ret
1063 ; RV32I-LABEL: varargs:
1065 ; RV32I-NEXT: addi sp, sp, -48
1066 ; RV32I-NEXT: mv a0, a1
1067 ; RV32I-NEXT: sw a7, 44(sp)
1068 ; RV32I-NEXT: sw a6, 40(sp)
1069 ; RV32I-NEXT: sw a5, 36(sp)
1070 ; RV32I-NEXT: sw a4, 32(sp)
1071 ; RV32I-NEXT: sw a3, 28(sp)
1072 ; RV32I-NEXT: sw a2, 24(sp)
1073 ; RV32I-NEXT: sw a1, 20(sp)
1074 ; RV32I-NEXT: addi a1, sp, 24
1075 ; RV32I-NEXT: sw a1, 12(sp)
1076 ; RV32I-NEXT: addi sp, sp, 48
1079 ; RV64I-LABEL: varargs:
1081 ; RV64I-NEXT: addi sp, sp, -80
1082 ; RV64I-NEXT: sd a1, 24(sp)
1083 ; RV64I-NEXT: sd a7, 72(sp)
1084 ; RV64I-NEXT: sd a6, 64(sp)
1085 ; RV64I-NEXT: addi a0, sp, 28
1086 ; RV64I-NEXT: sd a0, 8(sp)
1087 ; RV64I-NEXT: lw a0, 24(sp)
1088 ; RV64I-NEXT: sd a5, 56(sp)
1089 ; RV64I-NEXT: sd a4, 48(sp)
1090 ; RV64I-NEXT: sd a3, 40(sp)
1091 ; RV64I-NEXT: sd a2, 32(sp)
1092 ; RV64I-NEXT: addi sp, sp, 80
1095 call void @llvm.va_start(ptr %va)
1096 %argp.cur = load ptr, ptr %va
1097 %argp.next = getelementptr inbounds i8, ptr %argp.cur, i32 4
1098 store ptr %argp.next, ptr %va
1099 %1 = load i32, ptr %argp.cur
1100 call void @llvm.va_end(ptr %va)
1104 @var0 = global [18 x i32] zeroinitializer
1106 define void @many_args(i32, i32, i32, i32, i32, i32, i32, i32, i32) nounwind {
1107 ; RV32IZCMP-LABEL: many_args:
1108 ; RV32IZCMP: # %bb.0: # %entry
1109 ; RV32IZCMP-NEXT: cm.push {ra, s0-s4}, -32
1110 ; RV32IZCMP-NEXT: lui a0, %hi(var0)
1111 ; RV32IZCMP-NEXT: lw a6, %lo(var0)(a0)
1112 ; RV32IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
1113 ; RV32IZCMP-NEXT: lw t0, %lo(var0+8)(a0)
1114 ; RV32IZCMP-NEXT: lw t1, %lo(var0+12)(a0)
1115 ; RV32IZCMP-NEXT: addi a5, a0, %lo(var0)
1116 ; RV32IZCMP-NEXT: lw t2, 16(a5)
1117 ; RV32IZCMP-NEXT: lw t3, 20(a5)
1118 ; RV32IZCMP-NEXT: lw t4, 24(a5)
1119 ; RV32IZCMP-NEXT: lw t5, 28(a5)
1120 ; RV32IZCMP-NEXT: lw t6, 32(a5)
1121 ; RV32IZCMP-NEXT: lw s2, 36(a5)
1122 ; RV32IZCMP-NEXT: lw s3, 40(a5)
1123 ; RV32IZCMP-NEXT: lw s4, 44(a5)
1124 ; RV32IZCMP-NEXT: lw a1, 48(a5)
1125 ; RV32IZCMP-NEXT: lw s0, 52(a5)
1126 ; RV32IZCMP-NEXT: lw s1, 68(a5)
1127 ; RV32IZCMP-NEXT: lw a2, 64(a5)
1128 ; RV32IZCMP-NEXT: lw a3, 60(a5)
1129 ; RV32IZCMP-NEXT: lw a4, 56(a5)
1130 ; RV32IZCMP-NEXT: sw s1, 68(a5)
1131 ; RV32IZCMP-NEXT: sw a2, 64(a5)
1132 ; RV32IZCMP-NEXT: sw a3, 60(a5)
1133 ; RV32IZCMP-NEXT: sw a4, 56(a5)
1134 ; RV32IZCMP-NEXT: sw s0, 52(a5)
1135 ; RV32IZCMP-NEXT: sw a1, 48(a5)
1136 ; RV32IZCMP-NEXT: sw s4, 44(a5)
1137 ; RV32IZCMP-NEXT: sw s3, 40(a5)
1138 ; RV32IZCMP-NEXT: sw s2, 36(a5)
1139 ; RV32IZCMP-NEXT: sw t6, 32(a5)
1140 ; RV32IZCMP-NEXT: sw t5, 28(a5)
1141 ; RV32IZCMP-NEXT: sw t4, 24(a5)
1142 ; RV32IZCMP-NEXT: sw t3, 20(a5)
1143 ; RV32IZCMP-NEXT: sw t2, 16(a5)
1144 ; RV32IZCMP-NEXT: sw t1, %lo(var0+12)(a0)
1145 ; RV32IZCMP-NEXT: sw t0, %lo(var0+8)(a0)
1146 ; RV32IZCMP-NEXT: sw a7, %lo(var0+4)(a0)
1147 ; RV32IZCMP-NEXT: sw a6, %lo(var0)(a0)
1148 ; RV32IZCMP-NEXT: cm.popret {ra, s0-s4}, 32
1150 ; RV64IZCMP-LABEL: many_args:
1151 ; RV64IZCMP: # %bb.0: # %entry
1152 ; RV64IZCMP-NEXT: cm.push {ra, s0-s4}, -48
1153 ; RV64IZCMP-NEXT: lui a0, %hi(var0)
1154 ; RV64IZCMP-NEXT: lw a6, %lo(var0)(a0)
1155 ; RV64IZCMP-NEXT: lw a7, %lo(var0+4)(a0)
1156 ; RV64IZCMP-NEXT: lw t0, %lo(var0+8)(a0)
1157 ; RV64IZCMP-NEXT: lw t1, %lo(var0+12)(a0)
1158 ; RV64IZCMP-NEXT: addi a5, a0, %lo(var0)
1159 ; RV64IZCMP-NEXT: lw t2, 16(a5)
1160 ; RV64IZCMP-NEXT: lw t3, 20(a5)
1161 ; RV64IZCMP-NEXT: lw t4, 24(a5)
1162 ; RV64IZCMP-NEXT: lw t5, 28(a5)
1163 ; RV64IZCMP-NEXT: lw t6, 32(a5)
1164 ; RV64IZCMP-NEXT: lw s2, 36(a5)
1165 ; RV64IZCMP-NEXT: lw s3, 40(a5)
1166 ; RV64IZCMP-NEXT: lw s4, 44(a5)
1167 ; RV64IZCMP-NEXT: lw a1, 48(a5)
1168 ; RV64IZCMP-NEXT: lw s0, 52(a5)
1169 ; RV64IZCMP-NEXT: lw s1, 68(a5)
1170 ; RV64IZCMP-NEXT: lw a2, 64(a5)
1171 ; RV64IZCMP-NEXT: lw a3, 60(a5)
1172 ; RV64IZCMP-NEXT: lw a4, 56(a5)
1173 ; RV64IZCMP-NEXT: sw s1, 68(a5)
1174 ; RV64IZCMP-NEXT: sw a2, 64(a5)
1175 ; RV64IZCMP-NEXT: sw a3, 60(a5)
1176 ; RV64IZCMP-NEXT: sw a4, 56(a5)
1177 ; RV64IZCMP-NEXT: sw s0, 52(a5)
1178 ; RV64IZCMP-NEXT: sw a1, 48(a5)
1179 ; RV64IZCMP-NEXT: sw s4, 44(a5)
1180 ; RV64IZCMP-NEXT: sw s3, 40(a5)
1181 ; RV64IZCMP-NEXT: sw s2, 36(a5)
1182 ; RV64IZCMP-NEXT: sw t6, 32(a5)
1183 ; RV64IZCMP-NEXT: sw t5, 28(a5)
1184 ; RV64IZCMP-NEXT: sw t4, 24(a5)
1185 ; RV64IZCMP-NEXT: sw t3, 20(a5)
1186 ; RV64IZCMP-NEXT: sw t2, 16(a5)
1187 ; RV64IZCMP-NEXT: sw t1, %lo(var0+12)(a0)
1188 ; RV64IZCMP-NEXT: sw t0, %lo(var0+8)(a0)
1189 ; RV64IZCMP-NEXT: sw a7, %lo(var0+4)(a0)
1190 ; RV64IZCMP-NEXT: sw a6, %lo(var0)(a0)
1191 ; RV64IZCMP-NEXT: cm.popret {ra, s0-s4}, 48
1193 ; RV32IZCMP-SR-LABEL: many_args:
1194 ; RV32IZCMP-SR: # %bb.0: # %entry
1195 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s4}, -32
1196 ; RV32IZCMP-SR-NEXT: lui a0, %hi(var0)
1197 ; RV32IZCMP-SR-NEXT: lw a6, %lo(var0)(a0)
1198 ; RV32IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
1199 ; RV32IZCMP-SR-NEXT: lw t0, %lo(var0+8)(a0)
1200 ; RV32IZCMP-SR-NEXT: lw t1, %lo(var0+12)(a0)
1201 ; RV32IZCMP-SR-NEXT: addi a5, a0, %lo(var0)
1202 ; RV32IZCMP-SR-NEXT: lw t2, 16(a5)
1203 ; RV32IZCMP-SR-NEXT: lw t3, 20(a5)
1204 ; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
1205 ; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
1206 ; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
1207 ; RV32IZCMP-SR-NEXT: lw s2, 36(a5)
1208 ; RV32IZCMP-SR-NEXT: lw s3, 40(a5)
1209 ; RV32IZCMP-SR-NEXT: lw s4, 44(a5)
1210 ; RV32IZCMP-SR-NEXT: lw a1, 48(a5)
1211 ; RV32IZCMP-SR-NEXT: lw s0, 52(a5)
1212 ; RV32IZCMP-SR-NEXT: lw s1, 68(a5)
1213 ; RV32IZCMP-SR-NEXT: lw a2, 64(a5)
1214 ; RV32IZCMP-SR-NEXT: lw a3, 60(a5)
1215 ; RV32IZCMP-SR-NEXT: lw a4, 56(a5)
1216 ; RV32IZCMP-SR-NEXT: sw s1, 68(a5)
1217 ; RV32IZCMP-SR-NEXT: sw a2, 64(a5)
1218 ; RV32IZCMP-SR-NEXT: sw a3, 60(a5)
1219 ; RV32IZCMP-SR-NEXT: sw a4, 56(a5)
1220 ; RV32IZCMP-SR-NEXT: sw s0, 52(a5)
1221 ; RV32IZCMP-SR-NEXT: sw a1, 48(a5)
1222 ; RV32IZCMP-SR-NEXT: sw s4, 44(a5)
1223 ; RV32IZCMP-SR-NEXT: sw s3, 40(a5)
1224 ; RV32IZCMP-SR-NEXT: sw s2, 36(a5)
1225 ; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
1226 ; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
1227 ; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
1228 ; RV32IZCMP-SR-NEXT: sw t3, 20(a5)
1229 ; RV32IZCMP-SR-NEXT: sw t2, 16(a5)
1230 ; RV32IZCMP-SR-NEXT: sw t1, %lo(var0+12)(a0)
1231 ; RV32IZCMP-SR-NEXT: sw t0, %lo(var0+8)(a0)
1232 ; RV32IZCMP-SR-NEXT: sw a7, %lo(var0+4)(a0)
1233 ; RV32IZCMP-SR-NEXT: sw a6, %lo(var0)(a0)
1234 ; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s4}, 32
1236 ; RV64IZCMP-SR-LABEL: many_args:
1237 ; RV64IZCMP-SR: # %bb.0: # %entry
1238 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s4}, -48
1239 ; RV64IZCMP-SR-NEXT: lui a0, %hi(var0)
1240 ; RV64IZCMP-SR-NEXT: lw a6, %lo(var0)(a0)
1241 ; RV64IZCMP-SR-NEXT: lw a7, %lo(var0+4)(a0)
1242 ; RV64IZCMP-SR-NEXT: lw t0, %lo(var0+8)(a0)
1243 ; RV64IZCMP-SR-NEXT: lw t1, %lo(var0+12)(a0)
1244 ; RV64IZCMP-SR-NEXT: addi a5, a0, %lo(var0)
1245 ; RV64IZCMP-SR-NEXT: lw t2, 16(a5)
1246 ; RV64IZCMP-SR-NEXT: lw t3, 20(a5)
1247 ; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
1248 ; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
1249 ; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
1250 ; RV64IZCMP-SR-NEXT: lw s2, 36(a5)
1251 ; RV64IZCMP-SR-NEXT: lw s3, 40(a5)
1252 ; RV64IZCMP-SR-NEXT: lw s4, 44(a5)
1253 ; RV64IZCMP-SR-NEXT: lw a1, 48(a5)
1254 ; RV64IZCMP-SR-NEXT: lw s0, 52(a5)
1255 ; RV64IZCMP-SR-NEXT: lw s1, 68(a5)
1256 ; RV64IZCMP-SR-NEXT: lw a2, 64(a5)
1257 ; RV64IZCMP-SR-NEXT: lw a3, 60(a5)
1258 ; RV64IZCMP-SR-NEXT: lw a4, 56(a5)
1259 ; RV64IZCMP-SR-NEXT: sw s1, 68(a5)
1260 ; RV64IZCMP-SR-NEXT: sw a2, 64(a5)
1261 ; RV64IZCMP-SR-NEXT: sw a3, 60(a5)
1262 ; RV64IZCMP-SR-NEXT: sw a4, 56(a5)
1263 ; RV64IZCMP-SR-NEXT: sw s0, 52(a5)
1264 ; RV64IZCMP-SR-NEXT: sw a1, 48(a5)
1265 ; RV64IZCMP-SR-NEXT: sw s4, 44(a5)
1266 ; RV64IZCMP-SR-NEXT: sw s3, 40(a5)
1267 ; RV64IZCMP-SR-NEXT: sw s2, 36(a5)
1268 ; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
1269 ; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
1270 ; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
1271 ; RV64IZCMP-SR-NEXT: sw t3, 20(a5)
1272 ; RV64IZCMP-SR-NEXT: sw t2, 16(a5)
1273 ; RV64IZCMP-SR-NEXT: sw t1, %lo(var0+12)(a0)
1274 ; RV64IZCMP-SR-NEXT: sw t0, %lo(var0+8)(a0)
1275 ; RV64IZCMP-SR-NEXT: sw a7, %lo(var0+4)(a0)
1276 ; RV64IZCMP-SR-NEXT: sw a6, %lo(var0)(a0)
1277 ; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s4}, 48
1279 ; RV32I-LABEL: many_args:
1280 ; RV32I: # %bb.0: # %entry
1281 ; RV32I-NEXT: addi sp, sp, -32
1282 ; RV32I-NEXT: sw s0, 28(sp) # 4-byte Folded Spill
1283 ; RV32I-NEXT: sw s1, 24(sp) # 4-byte Folded Spill
1284 ; RV32I-NEXT: sw s2, 20(sp) # 4-byte Folded Spill
1285 ; RV32I-NEXT: sw s3, 16(sp) # 4-byte Folded Spill
1286 ; RV32I-NEXT: sw s4, 12(sp) # 4-byte Folded Spill
1287 ; RV32I-NEXT: lui a0, %hi(var0)
1288 ; RV32I-NEXT: lw a1, %lo(var0)(a0)
1289 ; RV32I-NEXT: lw a2, %lo(var0+4)(a0)
1290 ; RV32I-NEXT: lw a3, %lo(var0+8)(a0)
1291 ; RV32I-NEXT: lw a4, %lo(var0+12)(a0)
1292 ; RV32I-NEXT: addi a5, a0, %lo(var0)
1293 ; RV32I-NEXT: lw a6, 16(a5)
1294 ; RV32I-NEXT: lw a7, 20(a5)
1295 ; RV32I-NEXT: lw t0, 24(a5)
1296 ; RV32I-NEXT: lw t1, 28(a5)
1297 ; RV32I-NEXT: lw t2, 32(a5)
1298 ; RV32I-NEXT: lw t3, 36(a5)
1299 ; RV32I-NEXT: lw t4, 40(a5)
1300 ; RV32I-NEXT: lw t5, 44(a5)
1301 ; RV32I-NEXT: lw t6, 48(a5)
1302 ; RV32I-NEXT: lw s0, 52(a5)
1303 ; RV32I-NEXT: lw s1, 68(a5)
1304 ; RV32I-NEXT: lw s2, 64(a5)
1305 ; RV32I-NEXT: lw s3, 60(a5)
1306 ; RV32I-NEXT: lw s4, 56(a5)
1307 ; RV32I-NEXT: sw s1, 68(a5)
1308 ; RV32I-NEXT: sw s2, 64(a5)
1309 ; RV32I-NEXT: sw s3, 60(a5)
1310 ; RV32I-NEXT: sw s4, 56(a5)
1311 ; RV32I-NEXT: sw s0, 52(a5)
1312 ; RV32I-NEXT: sw t6, 48(a5)
1313 ; RV32I-NEXT: sw t5, 44(a5)
1314 ; RV32I-NEXT: sw t4, 40(a5)
1315 ; RV32I-NEXT: sw t3, 36(a5)
1316 ; RV32I-NEXT: sw t2, 32(a5)
1317 ; RV32I-NEXT: sw t1, 28(a5)
1318 ; RV32I-NEXT: sw t0, 24(a5)
1319 ; RV32I-NEXT: sw a7, 20(a5)
1320 ; RV32I-NEXT: sw a6, 16(a5)
1321 ; RV32I-NEXT: sw a4, %lo(var0+12)(a0)
1322 ; RV32I-NEXT: sw a3, %lo(var0+8)(a0)
1323 ; RV32I-NEXT: sw a2, %lo(var0+4)(a0)
1324 ; RV32I-NEXT: sw a1, %lo(var0)(a0)
1325 ; RV32I-NEXT: lw s0, 28(sp) # 4-byte Folded Reload
1326 ; RV32I-NEXT: lw s1, 24(sp) # 4-byte Folded Reload
1327 ; RV32I-NEXT: lw s2, 20(sp) # 4-byte Folded Reload
1328 ; RV32I-NEXT: lw s3, 16(sp) # 4-byte Folded Reload
1329 ; RV32I-NEXT: lw s4, 12(sp) # 4-byte Folded Reload
1330 ; RV32I-NEXT: addi sp, sp, 32
1333 ; RV64I-LABEL: many_args:
1334 ; RV64I: # %bb.0: # %entry
1335 ; RV64I-NEXT: addi sp, sp, -48
1336 ; RV64I-NEXT: sd s0, 40(sp) # 8-byte Folded Spill
1337 ; RV64I-NEXT: sd s1, 32(sp) # 8-byte Folded Spill
1338 ; RV64I-NEXT: sd s2, 24(sp) # 8-byte Folded Spill
1339 ; RV64I-NEXT: sd s3, 16(sp) # 8-byte Folded Spill
1340 ; RV64I-NEXT: sd s4, 8(sp) # 8-byte Folded Spill
1341 ; RV64I-NEXT: lui a0, %hi(var0)
1342 ; RV64I-NEXT: lw a1, %lo(var0)(a0)
1343 ; RV64I-NEXT: lw a2, %lo(var0+4)(a0)
1344 ; RV64I-NEXT: lw a3, %lo(var0+8)(a0)
1345 ; RV64I-NEXT: lw a4, %lo(var0+12)(a0)
1346 ; RV64I-NEXT: addi a5, a0, %lo(var0)
1347 ; RV64I-NEXT: lw a6, 16(a5)
1348 ; RV64I-NEXT: lw a7, 20(a5)
1349 ; RV64I-NEXT: lw t0, 24(a5)
1350 ; RV64I-NEXT: lw t1, 28(a5)
1351 ; RV64I-NEXT: lw t2, 32(a5)
1352 ; RV64I-NEXT: lw t3, 36(a5)
1353 ; RV64I-NEXT: lw t4, 40(a5)
1354 ; RV64I-NEXT: lw t5, 44(a5)
1355 ; RV64I-NEXT: lw t6, 48(a5)
1356 ; RV64I-NEXT: lw s0, 52(a5)
1357 ; RV64I-NEXT: lw s1, 68(a5)
1358 ; RV64I-NEXT: lw s2, 64(a5)
1359 ; RV64I-NEXT: lw s3, 60(a5)
1360 ; RV64I-NEXT: lw s4, 56(a5)
1361 ; RV64I-NEXT: sw s1, 68(a5)
1362 ; RV64I-NEXT: sw s2, 64(a5)
1363 ; RV64I-NEXT: sw s3, 60(a5)
1364 ; RV64I-NEXT: sw s4, 56(a5)
1365 ; RV64I-NEXT: sw s0, 52(a5)
1366 ; RV64I-NEXT: sw t6, 48(a5)
1367 ; RV64I-NEXT: sw t5, 44(a5)
1368 ; RV64I-NEXT: sw t4, 40(a5)
1369 ; RV64I-NEXT: sw t3, 36(a5)
1370 ; RV64I-NEXT: sw t2, 32(a5)
1371 ; RV64I-NEXT: sw t1, 28(a5)
1372 ; RV64I-NEXT: sw t0, 24(a5)
1373 ; RV64I-NEXT: sw a7, 20(a5)
1374 ; RV64I-NEXT: sw a6, 16(a5)
1375 ; RV64I-NEXT: sw a4, %lo(var0+12)(a0)
1376 ; RV64I-NEXT: sw a3, %lo(var0+8)(a0)
1377 ; RV64I-NEXT: sw a2, %lo(var0+4)(a0)
1378 ; RV64I-NEXT: sw a1, %lo(var0)(a0)
1379 ; RV64I-NEXT: ld s0, 40(sp) # 8-byte Folded Reload
1380 ; RV64I-NEXT: ld s1, 32(sp) # 8-byte Folded Reload
1381 ; RV64I-NEXT: ld s2, 24(sp) # 8-byte Folded Reload
1382 ; RV64I-NEXT: ld s3, 16(sp) # 8-byte Folded Reload
1383 ; RV64I-NEXT: ld s4, 8(sp) # 8-byte Folded Reload
1384 ; RV64I-NEXT: addi sp, sp, 48
1387 %val = load [18 x i32], ptr @var0
1388 store volatile [18 x i32] %val, ptr @var0
1392 ; Check that dynamic allocation calculations remain correct
1394 declare ptr @llvm.stacksave()
1395 declare void @llvm.stackrestore(ptr)
1396 declare void @notdead(ptr)
1398 define void @alloca(i32 %n) nounwind {
1399 ; RV32IZCMP-LABEL: alloca:
1400 ; RV32IZCMP: # %bb.0:
1401 ; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -16
1402 ; RV32IZCMP-NEXT: addi s0, sp, 16
1403 ; RV32IZCMP-NEXT: mv s1, sp
1404 ; RV32IZCMP-NEXT: addi a0, a0, 15
1405 ; RV32IZCMP-NEXT: andi a0, a0, -16
1406 ; RV32IZCMP-NEXT: sub a0, sp, a0
1407 ; RV32IZCMP-NEXT: mv sp, a0
1408 ; RV32IZCMP-NEXT: call notdead
1409 ; RV32IZCMP-NEXT: mv sp, s1
1410 ; RV32IZCMP-NEXT: addi sp, s0, -16
1411 ; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 16
1413 ; RV64IZCMP-LABEL: alloca:
1414 ; RV64IZCMP: # %bb.0:
1415 ; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -32
1416 ; RV64IZCMP-NEXT: addi s0, sp, 32
1417 ; RV64IZCMP-NEXT: mv s1, sp
1418 ; RV64IZCMP-NEXT: slli a0, a0, 32
1419 ; RV64IZCMP-NEXT: srli a0, a0, 32
1420 ; RV64IZCMP-NEXT: addi a0, a0, 15
1421 ; RV64IZCMP-NEXT: andi a0, a0, -16
1422 ; RV64IZCMP-NEXT: sub a0, sp, a0
1423 ; RV64IZCMP-NEXT: mv sp, a0
1424 ; RV64IZCMP-NEXT: call notdead
1425 ; RV64IZCMP-NEXT: mv sp, s1
1426 ; RV64IZCMP-NEXT: addi sp, s0, -32
1427 ; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
1429 ; RV32IZCMP-SR-LABEL: alloca:
1430 ; RV32IZCMP-SR: # %bb.0:
1431 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -16
1432 ; RV32IZCMP-SR-NEXT: addi s0, sp, 16
1433 ; RV32IZCMP-SR-NEXT: mv s1, sp
1434 ; RV32IZCMP-SR-NEXT: addi a0, a0, 15
1435 ; RV32IZCMP-SR-NEXT: andi a0, a0, -16
1436 ; RV32IZCMP-SR-NEXT: sub a0, sp, a0
1437 ; RV32IZCMP-SR-NEXT: mv sp, a0
1438 ; RV32IZCMP-SR-NEXT: call notdead
1439 ; RV32IZCMP-SR-NEXT: mv sp, s1
1440 ; RV32IZCMP-SR-NEXT: addi sp, s0, -16
1441 ; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 16
1443 ; RV64IZCMP-SR-LABEL: alloca:
1444 ; RV64IZCMP-SR: # %bb.0:
1445 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32
1446 ; RV64IZCMP-SR-NEXT: addi s0, sp, 32
1447 ; RV64IZCMP-SR-NEXT: mv s1, sp
1448 ; RV64IZCMP-SR-NEXT: slli a0, a0, 32
1449 ; RV64IZCMP-SR-NEXT: srli a0, a0, 32
1450 ; RV64IZCMP-SR-NEXT: addi a0, a0, 15
1451 ; RV64IZCMP-SR-NEXT: andi a0, a0, -16
1452 ; RV64IZCMP-SR-NEXT: sub a0, sp, a0
1453 ; RV64IZCMP-SR-NEXT: mv sp, a0
1454 ; RV64IZCMP-SR-NEXT: call notdead
1455 ; RV64IZCMP-SR-NEXT: mv sp, s1
1456 ; RV64IZCMP-SR-NEXT: addi sp, s0, -32
1457 ; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
1459 ; RV32I-LABEL: alloca:
1461 ; RV32I-NEXT: addi sp, sp, -16
1462 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1463 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
1464 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
1465 ; RV32I-NEXT: addi s0, sp, 16
1466 ; RV32I-NEXT: mv s1, sp
1467 ; RV32I-NEXT: addi a0, a0, 15
1468 ; RV32I-NEXT: andi a0, a0, -16
1469 ; RV32I-NEXT: sub a0, sp, a0
1470 ; RV32I-NEXT: mv sp, a0
1471 ; RV32I-NEXT: call notdead
1472 ; RV32I-NEXT: mv sp, s1
1473 ; RV32I-NEXT: addi sp, s0, -16
1474 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1475 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
1476 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
1477 ; RV32I-NEXT: addi sp, sp, 16
1480 ; RV64I-LABEL: alloca:
1482 ; RV64I-NEXT: addi sp, sp, -32
1483 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
1484 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
1485 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
1486 ; RV64I-NEXT: addi s0, sp, 32
1487 ; RV64I-NEXT: mv s1, sp
1488 ; RV64I-NEXT: slli a0, a0, 32
1489 ; RV64I-NEXT: srli a0, a0, 32
1490 ; RV64I-NEXT: addi a0, a0, 15
1491 ; RV64I-NEXT: andi a0, a0, -16
1492 ; RV64I-NEXT: sub a0, sp, a0
1493 ; RV64I-NEXT: mv sp, a0
1494 ; RV64I-NEXT: call notdead
1495 ; RV64I-NEXT: mv sp, s1
1496 ; RV64I-NEXT: addi sp, s0, -32
1497 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
1498 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
1499 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
1500 ; RV64I-NEXT: addi sp, sp, 32
1502 %sp = call ptr @llvm.stacksave()
1503 %addr = alloca i8, i32 %n
1504 call void @notdead(ptr %addr)
1505 call void @llvm.stackrestore(ptr %sp)
1509 declare i32 @foo_test_irq(...)
1510 @var_test_irq = global [32 x i32] zeroinitializer
1512 define void @foo_with_irq() nounwind "interrupt"="user" {
1513 ; RV32IZCMP-LABEL: foo_with_irq:
1514 ; RV32IZCMP: # %bb.0:
1515 ; RV32IZCMP-NEXT: cm.push {ra}, -64
1516 ; RV32IZCMP-NEXT: addi sp, sp, -16
1517 ; RV32IZCMP-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
1518 ; RV32IZCMP-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
1519 ; RV32IZCMP-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
1520 ; RV32IZCMP-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
1521 ; RV32IZCMP-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
1522 ; RV32IZCMP-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
1523 ; RV32IZCMP-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
1524 ; RV32IZCMP-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
1525 ; RV32IZCMP-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
1526 ; RV32IZCMP-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
1527 ; RV32IZCMP-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
1528 ; RV32IZCMP-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
1529 ; RV32IZCMP-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
1530 ; RV32IZCMP-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
1531 ; RV32IZCMP-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
1532 ; RV32IZCMP-NEXT: call foo_test_irq
1533 ; RV32IZCMP-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
1534 ; RV32IZCMP-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
1535 ; RV32IZCMP-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
1536 ; RV32IZCMP-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
1537 ; RV32IZCMP-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
1538 ; RV32IZCMP-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
1539 ; RV32IZCMP-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
1540 ; RV32IZCMP-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
1541 ; RV32IZCMP-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
1542 ; RV32IZCMP-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
1543 ; RV32IZCMP-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
1544 ; RV32IZCMP-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
1545 ; RV32IZCMP-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
1546 ; RV32IZCMP-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
1547 ; RV32IZCMP-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
1548 ; RV32IZCMP-NEXT: addi sp, sp, 16
1549 ; RV32IZCMP-NEXT: cm.pop {ra}, 64
1550 ; RV32IZCMP-NEXT: mret
1552 ; RV64IZCMP-LABEL: foo_with_irq:
1553 ; RV64IZCMP: # %bb.0:
1554 ; RV64IZCMP-NEXT: cm.push {ra}, -64
1555 ; RV64IZCMP-NEXT: addi sp, sp, -80
1556 ; RV64IZCMP-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
1557 ; RV64IZCMP-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
1558 ; RV64IZCMP-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
1559 ; RV64IZCMP-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
1560 ; RV64IZCMP-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
1561 ; RV64IZCMP-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
1562 ; RV64IZCMP-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
1563 ; RV64IZCMP-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
1564 ; RV64IZCMP-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
1565 ; RV64IZCMP-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
1566 ; RV64IZCMP-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
1567 ; RV64IZCMP-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
1568 ; RV64IZCMP-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
1569 ; RV64IZCMP-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
1570 ; RV64IZCMP-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
1571 ; RV64IZCMP-NEXT: call foo_test_irq
1572 ; RV64IZCMP-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
1573 ; RV64IZCMP-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
1574 ; RV64IZCMP-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
1575 ; RV64IZCMP-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
1576 ; RV64IZCMP-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
1577 ; RV64IZCMP-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
1578 ; RV64IZCMP-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
1579 ; RV64IZCMP-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
1580 ; RV64IZCMP-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
1581 ; RV64IZCMP-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
1582 ; RV64IZCMP-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
1583 ; RV64IZCMP-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
1584 ; RV64IZCMP-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
1585 ; RV64IZCMP-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
1586 ; RV64IZCMP-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
1587 ; RV64IZCMP-NEXT: addi sp, sp, 80
1588 ; RV64IZCMP-NEXT: cm.pop {ra}, 64
1589 ; RV64IZCMP-NEXT: mret
1591 ; RV32IZCMP-SR-LABEL: foo_with_irq:
1592 ; RV32IZCMP-SR: # %bb.0:
1593 ; RV32IZCMP-SR-NEXT: cm.push {ra}, -64
1594 ; RV32IZCMP-SR-NEXT: addi sp, sp, -16
1595 ; RV32IZCMP-SR-NEXT: sw t0, 60(sp) # 4-byte Folded Spill
1596 ; RV32IZCMP-SR-NEXT: sw t1, 56(sp) # 4-byte Folded Spill
1597 ; RV32IZCMP-SR-NEXT: sw t2, 52(sp) # 4-byte Folded Spill
1598 ; RV32IZCMP-SR-NEXT: sw a0, 48(sp) # 4-byte Folded Spill
1599 ; RV32IZCMP-SR-NEXT: sw a1, 44(sp) # 4-byte Folded Spill
1600 ; RV32IZCMP-SR-NEXT: sw a2, 40(sp) # 4-byte Folded Spill
1601 ; RV32IZCMP-SR-NEXT: sw a3, 36(sp) # 4-byte Folded Spill
1602 ; RV32IZCMP-SR-NEXT: sw a4, 32(sp) # 4-byte Folded Spill
1603 ; RV32IZCMP-SR-NEXT: sw a5, 28(sp) # 4-byte Folded Spill
1604 ; RV32IZCMP-SR-NEXT: sw a6, 24(sp) # 4-byte Folded Spill
1605 ; RV32IZCMP-SR-NEXT: sw a7, 20(sp) # 4-byte Folded Spill
1606 ; RV32IZCMP-SR-NEXT: sw t3, 16(sp) # 4-byte Folded Spill
1607 ; RV32IZCMP-SR-NEXT: sw t4, 12(sp) # 4-byte Folded Spill
1608 ; RV32IZCMP-SR-NEXT: sw t5, 8(sp) # 4-byte Folded Spill
1609 ; RV32IZCMP-SR-NEXT: sw t6, 4(sp) # 4-byte Folded Spill
1610 ; RV32IZCMP-SR-NEXT: call foo_test_irq
1611 ; RV32IZCMP-SR-NEXT: lw t0, 60(sp) # 4-byte Folded Reload
1612 ; RV32IZCMP-SR-NEXT: lw t1, 56(sp) # 4-byte Folded Reload
1613 ; RV32IZCMP-SR-NEXT: lw t2, 52(sp) # 4-byte Folded Reload
1614 ; RV32IZCMP-SR-NEXT: lw a0, 48(sp) # 4-byte Folded Reload
1615 ; RV32IZCMP-SR-NEXT: lw a1, 44(sp) # 4-byte Folded Reload
1616 ; RV32IZCMP-SR-NEXT: lw a2, 40(sp) # 4-byte Folded Reload
1617 ; RV32IZCMP-SR-NEXT: lw a3, 36(sp) # 4-byte Folded Reload
1618 ; RV32IZCMP-SR-NEXT: lw a4, 32(sp) # 4-byte Folded Reload
1619 ; RV32IZCMP-SR-NEXT: lw a5, 28(sp) # 4-byte Folded Reload
1620 ; RV32IZCMP-SR-NEXT: lw a6, 24(sp) # 4-byte Folded Reload
1621 ; RV32IZCMP-SR-NEXT: lw a7, 20(sp) # 4-byte Folded Reload
1622 ; RV32IZCMP-SR-NEXT: lw t3, 16(sp) # 4-byte Folded Reload
1623 ; RV32IZCMP-SR-NEXT: lw t4, 12(sp) # 4-byte Folded Reload
1624 ; RV32IZCMP-SR-NEXT: lw t5, 8(sp) # 4-byte Folded Reload
1625 ; RV32IZCMP-SR-NEXT: lw t6, 4(sp) # 4-byte Folded Reload
1626 ; RV32IZCMP-SR-NEXT: addi sp, sp, 16
1627 ; RV32IZCMP-SR-NEXT: cm.pop {ra}, 64
1628 ; RV32IZCMP-SR-NEXT: mret
1630 ; RV64IZCMP-SR-LABEL: foo_with_irq:
1631 ; RV64IZCMP-SR: # %bb.0:
1632 ; RV64IZCMP-SR-NEXT: cm.push {ra}, -64
1633 ; RV64IZCMP-SR-NEXT: addi sp, sp, -80
1634 ; RV64IZCMP-SR-NEXT: sd t0, 120(sp) # 8-byte Folded Spill
1635 ; RV64IZCMP-SR-NEXT: sd t1, 112(sp) # 8-byte Folded Spill
1636 ; RV64IZCMP-SR-NEXT: sd t2, 104(sp) # 8-byte Folded Spill
1637 ; RV64IZCMP-SR-NEXT: sd a0, 96(sp) # 8-byte Folded Spill
1638 ; RV64IZCMP-SR-NEXT: sd a1, 88(sp) # 8-byte Folded Spill
1639 ; RV64IZCMP-SR-NEXT: sd a2, 80(sp) # 8-byte Folded Spill
1640 ; RV64IZCMP-SR-NEXT: sd a3, 72(sp) # 8-byte Folded Spill
1641 ; RV64IZCMP-SR-NEXT: sd a4, 64(sp) # 8-byte Folded Spill
1642 ; RV64IZCMP-SR-NEXT: sd a5, 56(sp) # 8-byte Folded Spill
1643 ; RV64IZCMP-SR-NEXT: sd a6, 48(sp) # 8-byte Folded Spill
1644 ; RV64IZCMP-SR-NEXT: sd a7, 40(sp) # 8-byte Folded Spill
1645 ; RV64IZCMP-SR-NEXT: sd t3, 32(sp) # 8-byte Folded Spill
1646 ; RV64IZCMP-SR-NEXT: sd t4, 24(sp) # 8-byte Folded Spill
1647 ; RV64IZCMP-SR-NEXT: sd t5, 16(sp) # 8-byte Folded Spill
1648 ; RV64IZCMP-SR-NEXT: sd t6, 8(sp) # 8-byte Folded Spill
1649 ; RV64IZCMP-SR-NEXT: call foo_test_irq
1650 ; RV64IZCMP-SR-NEXT: ld t0, 120(sp) # 8-byte Folded Reload
1651 ; RV64IZCMP-SR-NEXT: ld t1, 112(sp) # 8-byte Folded Reload
1652 ; RV64IZCMP-SR-NEXT: ld t2, 104(sp) # 8-byte Folded Reload
1653 ; RV64IZCMP-SR-NEXT: ld a0, 96(sp) # 8-byte Folded Reload
1654 ; RV64IZCMP-SR-NEXT: ld a1, 88(sp) # 8-byte Folded Reload
1655 ; RV64IZCMP-SR-NEXT: ld a2, 80(sp) # 8-byte Folded Reload
1656 ; RV64IZCMP-SR-NEXT: ld a3, 72(sp) # 8-byte Folded Reload
1657 ; RV64IZCMP-SR-NEXT: ld a4, 64(sp) # 8-byte Folded Reload
1658 ; RV64IZCMP-SR-NEXT: ld a5, 56(sp) # 8-byte Folded Reload
1659 ; RV64IZCMP-SR-NEXT: ld a6, 48(sp) # 8-byte Folded Reload
1660 ; RV64IZCMP-SR-NEXT: ld a7, 40(sp) # 8-byte Folded Reload
1661 ; RV64IZCMP-SR-NEXT: ld t3, 32(sp) # 8-byte Folded Reload
1662 ; RV64IZCMP-SR-NEXT: ld t4, 24(sp) # 8-byte Folded Reload
1663 ; RV64IZCMP-SR-NEXT: ld t5, 16(sp) # 8-byte Folded Reload
1664 ; RV64IZCMP-SR-NEXT: ld t6, 8(sp) # 8-byte Folded Reload
1665 ; RV64IZCMP-SR-NEXT: addi sp, sp, 80
1666 ; RV64IZCMP-SR-NEXT: cm.pop {ra}, 64
1667 ; RV64IZCMP-SR-NEXT: mret
1669 ; RV32I-LABEL: foo_with_irq:
1671 ; RV32I-NEXT: addi sp, sp, -64
1672 ; RV32I-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
1673 ; RV32I-NEXT: sw t0, 56(sp) # 4-byte Folded Spill
1674 ; RV32I-NEXT: sw t1, 52(sp) # 4-byte Folded Spill
1675 ; RV32I-NEXT: sw t2, 48(sp) # 4-byte Folded Spill
1676 ; RV32I-NEXT: sw a0, 44(sp) # 4-byte Folded Spill
1677 ; RV32I-NEXT: sw a1, 40(sp) # 4-byte Folded Spill
1678 ; RV32I-NEXT: sw a2, 36(sp) # 4-byte Folded Spill
1679 ; RV32I-NEXT: sw a3, 32(sp) # 4-byte Folded Spill
1680 ; RV32I-NEXT: sw a4, 28(sp) # 4-byte Folded Spill
1681 ; RV32I-NEXT: sw a5, 24(sp) # 4-byte Folded Spill
1682 ; RV32I-NEXT: sw a6, 20(sp) # 4-byte Folded Spill
1683 ; RV32I-NEXT: sw a7, 16(sp) # 4-byte Folded Spill
1684 ; RV32I-NEXT: sw t3, 12(sp) # 4-byte Folded Spill
1685 ; RV32I-NEXT: sw t4, 8(sp) # 4-byte Folded Spill
1686 ; RV32I-NEXT: sw t5, 4(sp) # 4-byte Folded Spill
1687 ; RV32I-NEXT: sw t6, 0(sp) # 4-byte Folded Spill
1688 ; RV32I-NEXT: call foo_test_irq
1689 ; RV32I-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
1690 ; RV32I-NEXT: lw t0, 56(sp) # 4-byte Folded Reload
1691 ; RV32I-NEXT: lw t1, 52(sp) # 4-byte Folded Reload
1692 ; RV32I-NEXT: lw t2, 48(sp) # 4-byte Folded Reload
1693 ; RV32I-NEXT: lw a0, 44(sp) # 4-byte Folded Reload
1694 ; RV32I-NEXT: lw a1, 40(sp) # 4-byte Folded Reload
1695 ; RV32I-NEXT: lw a2, 36(sp) # 4-byte Folded Reload
1696 ; RV32I-NEXT: lw a3, 32(sp) # 4-byte Folded Reload
1697 ; RV32I-NEXT: lw a4, 28(sp) # 4-byte Folded Reload
1698 ; RV32I-NEXT: lw a5, 24(sp) # 4-byte Folded Reload
1699 ; RV32I-NEXT: lw a6, 20(sp) # 4-byte Folded Reload
1700 ; RV32I-NEXT: lw a7, 16(sp) # 4-byte Folded Reload
1701 ; RV32I-NEXT: lw t3, 12(sp) # 4-byte Folded Reload
1702 ; RV32I-NEXT: lw t4, 8(sp) # 4-byte Folded Reload
1703 ; RV32I-NEXT: lw t5, 4(sp) # 4-byte Folded Reload
1704 ; RV32I-NEXT: lw t6, 0(sp) # 4-byte Folded Reload
1705 ; RV32I-NEXT: addi sp, sp, 64
1708 ; RV64I-LABEL: foo_with_irq:
1710 ; RV64I-NEXT: addi sp, sp, -128
1711 ; RV64I-NEXT: sd ra, 120(sp) # 8-byte Folded Spill
1712 ; RV64I-NEXT: sd t0, 112(sp) # 8-byte Folded Spill
1713 ; RV64I-NEXT: sd t1, 104(sp) # 8-byte Folded Spill
1714 ; RV64I-NEXT: sd t2, 96(sp) # 8-byte Folded Spill
1715 ; RV64I-NEXT: sd a0, 88(sp) # 8-byte Folded Spill
1716 ; RV64I-NEXT: sd a1, 80(sp) # 8-byte Folded Spill
1717 ; RV64I-NEXT: sd a2, 72(sp) # 8-byte Folded Spill
1718 ; RV64I-NEXT: sd a3, 64(sp) # 8-byte Folded Spill
1719 ; RV64I-NEXT: sd a4, 56(sp) # 8-byte Folded Spill
1720 ; RV64I-NEXT: sd a5, 48(sp) # 8-byte Folded Spill
1721 ; RV64I-NEXT: sd a6, 40(sp) # 8-byte Folded Spill
1722 ; RV64I-NEXT: sd a7, 32(sp) # 8-byte Folded Spill
1723 ; RV64I-NEXT: sd t3, 24(sp) # 8-byte Folded Spill
1724 ; RV64I-NEXT: sd t4, 16(sp) # 8-byte Folded Spill
1725 ; RV64I-NEXT: sd t5, 8(sp) # 8-byte Folded Spill
1726 ; RV64I-NEXT: sd t6, 0(sp) # 8-byte Folded Spill
1727 ; RV64I-NEXT: call foo_test_irq
1728 ; RV64I-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
1729 ; RV64I-NEXT: ld t0, 112(sp) # 8-byte Folded Reload
1730 ; RV64I-NEXT: ld t1, 104(sp) # 8-byte Folded Reload
1731 ; RV64I-NEXT: ld t2, 96(sp) # 8-byte Folded Reload
1732 ; RV64I-NEXT: ld a0, 88(sp) # 8-byte Folded Reload
1733 ; RV64I-NEXT: ld a1, 80(sp) # 8-byte Folded Reload
1734 ; RV64I-NEXT: ld a2, 72(sp) # 8-byte Folded Reload
1735 ; RV64I-NEXT: ld a3, 64(sp) # 8-byte Folded Reload
1736 ; RV64I-NEXT: ld a4, 56(sp) # 8-byte Folded Reload
1737 ; RV64I-NEXT: ld a5, 48(sp) # 8-byte Folded Reload
1738 ; RV64I-NEXT: ld a6, 40(sp) # 8-byte Folded Reload
1739 ; RV64I-NEXT: ld a7, 32(sp) # 8-byte Folded Reload
1740 ; RV64I-NEXT: ld t3, 24(sp) # 8-byte Folded Reload
1741 ; RV64I-NEXT: ld t4, 16(sp) # 8-byte Folded Reload
1742 ; RV64I-NEXT: ld t5, 8(sp) # 8-byte Folded Reload
1743 ; RV64I-NEXT: ld t6, 0(sp) # 8-byte Folded Reload
1744 ; RV64I-NEXT: addi sp, sp, 128
1746 %call = call i32 bitcast (i32 (...)* @foo_test_irq to i32 ()*)()
1750 define void @foo_no_irq() nounwind{
1751 ; RV32IZCMP-LABEL: foo_no_irq:
1752 ; RV32IZCMP: # %bb.0:
1753 ; RV32IZCMP-NEXT: cm.push {ra}, -16
1754 ; RV32IZCMP-NEXT: call foo_test_irq
1755 ; RV32IZCMP-NEXT: cm.popret {ra}, 16
1757 ; RV64IZCMP-LABEL: foo_no_irq:
1758 ; RV64IZCMP: # %bb.0:
1759 ; RV64IZCMP-NEXT: cm.push {ra}, -16
1760 ; RV64IZCMP-NEXT: call foo_test_irq
1761 ; RV64IZCMP-NEXT: cm.popret {ra}, 16
1763 ; RV32IZCMP-SR-LABEL: foo_no_irq:
1764 ; RV32IZCMP-SR: # %bb.0:
1765 ; RV32IZCMP-SR-NEXT: cm.push {ra}, -16
1766 ; RV32IZCMP-SR-NEXT: call foo_test_irq
1767 ; RV32IZCMP-SR-NEXT: cm.popret {ra}, 16
1769 ; RV64IZCMP-SR-LABEL: foo_no_irq:
1770 ; RV64IZCMP-SR: # %bb.0:
1771 ; RV64IZCMP-SR-NEXT: cm.push {ra}, -16
1772 ; RV64IZCMP-SR-NEXT: call foo_test_irq
1773 ; RV64IZCMP-SR-NEXT: cm.popret {ra}, 16
1775 ; RV32I-LABEL: foo_no_irq:
1777 ; RV32I-NEXT: addi sp, sp, -16
1778 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
1779 ; RV32I-NEXT: call foo_test_irq
1780 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
1781 ; RV32I-NEXT: addi sp, sp, 16
1784 ; RV64I-LABEL: foo_no_irq:
1786 ; RV64I-NEXT: addi sp, sp, -16
1787 ; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
1788 ; RV64I-NEXT: call foo_test_irq
1789 ; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
1790 ; RV64I-NEXT: addi sp, sp, 16
1792 %call = call i32 bitcast (i32 (...)* @foo_test_irq to i32 ()*)()
1796 define void @callee_with_irq() nounwind "interrupt"="user" {
1797 ; RV32IZCMP-LABEL: callee_with_irq:
1798 ; RV32IZCMP: # %bb.0:
1799 ; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -112
1800 ; RV32IZCMP-NEXT: addi sp, sp, -48
1801 ; RV32IZCMP-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
1802 ; RV32IZCMP-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
1803 ; RV32IZCMP-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
1804 ; RV32IZCMP-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
1805 ; RV32IZCMP-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
1806 ; RV32IZCMP-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
1807 ; RV32IZCMP-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
1808 ; RV32IZCMP-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
1809 ; RV32IZCMP-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
1810 ; RV32IZCMP-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
1811 ; RV32IZCMP-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
1812 ; RV32IZCMP-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
1813 ; RV32IZCMP-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
1814 ; RV32IZCMP-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
1815 ; RV32IZCMP-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
1816 ; RV32IZCMP-NEXT: lui a6, %hi(var_test_irq)
1817 ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(a6)
1818 ; RV32IZCMP-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
1819 ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a6)
1820 ; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
1821 ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a6)
1822 ; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
1823 ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a6)
1824 ; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
1825 ; RV32IZCMP-NEXT: addi a5, a6, %lo(var_test_irq)
1826 ; RV32IZCMP-NEXT: lw a0, 16(a5)
1827 ; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
1828 ; RV32IZCMP-NEXT: lw a0, 20(a5)
1829 ; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
1830 ; RV32IZCMP-NEXT: lw t4, 24(a5)
1831 ; RV32IZCMP-NEXT: lw t5, 28(a5)
1832 ; RV32IZCMP-NEXT: lw t6, 32(a5)
1833 ; RV32IZCMP-NEXT: lw s2, 36(a5)
1834 ; RV32IZCMP-NEXT: lw s3, 40(a5)
1835 ; RV32IZCMP-NEXT: lw s4, 44(a5)
1836 ; RV32IZCMP-NEXT: lw s5, 48(a5)
1837 ; RV32IZCMP-NEXT: lw s6, 52(a5)
1838 ; RV32IZCMP-NEXT: lw s7, 56(a5)
1839 ; RV32IZCMP-NEXT: lw s8, 60(a5)
1840 ; RV32IZCMP-NEXT: lw s9, 64(a5)
1841 ; RV32IZCMP-NEXT: lw s10, 68(a5)
1842 ; RV32IZCMP-NEXT: lw s11, 72(a5)
1843 ; RV32IZCMP-NEXT: lw ra, 76(a5)
1844 ; RV32IZCMP-NEXT: lw s1, 80(a5)
1845 ; RV32IZCMP-NEXT: lw t3, 84(a5)
1846 ; RV32IZCMP-NEXT: lw t2, 88(a5)
1847 ; RV32IZCMP-NEXT: lw t1, 92(a5)
1848 ; RV32IZCMP-NEXT: lw t0, 96(a5)
1849 ; RV32IZCMP-NEXT: lw s0, 100(a5)
1850 ; RV32IZCMP-NEXT: lw a7, 104(a5)
1851 ; RV32IZCMP-NEXT: lw a4, 108(a5)
1852 ; RV32IZCMP-NEXT: lw a0, 124(a5)
1853 ; RV32IZCMP-NEXT: lw a1, 120(a5)
1854 ; RV32IZCMP-NEXT: lw a2, 116(a5)
1855 ; RV32IZCMP-NEXT: lw a3, 112(a5)
1856 ; RV32IZCMP-NEXT: sw a0, 124(a5)
1857 ; RV32IZCMP-NEXT: sw a1, 120(a5)
1858 ; RV32IZCMP-NEXT: sw a2, 116(a5)
1859 ; RV32IZCMP-NEXT: sw a3, 112(a5)
1860 ; RV32IZCMP-NEXT: sw a4, 108(a5)
1861 ; RV32IZCMP-NEXT: sw a7, 104(a5)
1862 ; RV32IZCMP-NEXT: sw s0, 100(a5)
1863 ; RV32IZCMP-NEXT: sw t0, 96(a5)
1864 ; RV32IZCMP-NEXT: sw t1, 92(a5)
1865 ; RV32IZCMP-NEXT: sw t2, 88(a5)
1866 ; RV32IZCMP-NEXT: sw t3, 84(a5)
1867 ; RV32IZCMP-NEXT: sw s1, 80(a5)
1868 ; RV32IZCMP-NEXT: sw ra, 76(a5)
1869 ; RV32IZCMP-NEXT: sw s11, 72(a5)
1870 ; RV32IZCMP-NEXT: sw s10, 68(a5)
1871 ; RV32IZCMP-NEXT: sw s9, 64(a5)
1872 ; RV32IZCMP-NEXT: sw s8, 60(a5)
1873 ; RV32IZCMP-NEXT: sw s7, 56(a5)
1874 ; RV32IZCMP-NEXT: sw s6, 52(a5)
1875 ; RV32IZCMP-NEXT: sw s5, 48(a5)
1876 ; RV32IZCMP-NEXT: sw s4, 44(a5)
1877 ; RV32IZCMP-NEXT: sw s3, 40(a5)
1878 ; RV32IZCMP-NEXT: sw s2, 36(a5)
1879 ; RV32IZCMP-NEXT: sw t6, 32(a5)
1880 ; RV32IZCMP-NEXT: sw t5, 28(a5)
1881 ; RV32IZCMP-NEXT: sw t4, 24(a5)
1882 ; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
1883 ; RV32IZCMP-NEXT: sw a0, 20(a5)
1884 ; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
1885 ; RV32IZCMP-NEXT: sw a0, 16(a5)
1886 ; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
1887 ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a6)
1888 ; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
1889 ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a6)
1890 ; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
1891 ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a6)
1892 ; RV32IZCMP-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
1893 ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(a6)
1894 ; RV32IZCMP-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
1895 ; RV32IZCMP-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
1896 ; RV32IZCMP-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
1897 ; RV32IZCMP-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
1898 ; RV32IZCMP-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
1899 ; RV32IZCMP-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
1900 ; RV32IZCMP-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
1901 ; RV32IZCMP-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
1902 ; RV32IZCMP-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
1903 ; RV32IZCMP-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
1904 ; RV32IZCMP-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
1905 ; RV32IZCMP-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
1906 ; RV32IZCMP-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
1907 ; RV32IZCMP-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
1908 ; RV32IZCMP-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
1909 ; RV32IZCMP-NEXT: addi sp, sp, 48
1910 ; RV32IZCMP-NEXT: cm.pop {ra, s0-s11}, 112
1911 ; RV32IZCMP-NEXT: mret
1913 ; RV64IZCMP-LABEL: callee_with_irq:
1914 ; RV64IZCMP: # %bb.0:
1915 ; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
1916 ; RV64IZCMP-NEXT: addi sp, sp, -128
1917 ; RV64IZCMP-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
1918 ; RV64IZCMP-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
1919 ; RV64IZCMP-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
1920 ; RV64IZCMP-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
1921 ; RV64IZCMP-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
1922 ; RV64IZCMP-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
1923 ; RV64IZCMP-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
1924 ; RV64IZCMP-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
1925 ; RV64IZCMP-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
1926 ; RV64IZCMP-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
1927 ; RV64IZCMP-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
1928 ; RV64IZCMP-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
1929 ; RV64IZCMP-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
1930 ; RV64IZCMP-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
1931 ; RV64IZCMP-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
1932 ; RV64IZCMP-NEXT: lui a6, %hi(var_test_irq)
1933 ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(a6)
1934 ; RV64IZCMP-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
1935 ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a6)
1936 ; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
1937 ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a6)
1938 ; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
1939 ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a6)
1940 ; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
1941 ; RV64IZCMP-NEXT: addi a5, a6, %lo(var_test_irq)
1942 ; RV64IZCMP-NEXT: lw a0, 16(a5)
1943 ; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
1944 ; RV64IZCMP-NEXT: lw a0, 20(a5)
1945 ; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
1946 ; RV64IZCMP-NEXT: lw t4, 24(a5)
1947 ; RV64IZCMP-NEXT: lw t5, 28(a5)
1948 ; RV64IZCMP-NEXT: lw t6, 32(a5)
1949 ; RV64IZCMP-NEXT: lw s2, 36(a5)
1950 ; RV64IZCMP-NEXT: lw s3, 40(a5)
1951 ; RV64IZCMP-NEXT: lw s4, 44(a5)
1952 ; RV64IZCMP-NEXT: lw s5, 48(a5)
1953 ; RV64IZCMP-NEXT: lw s6, 52(a5)
1954 ; RV64IZCMP-NEXT: lw s7, 56(a5)
1955 ; RV64IZCMP-NEXT: lw s8, 60(a5)
1956 ; RV64IZCMP-NEXT: lw s9, 64(a5)
1957 ; RV64IZCMP-NEXT: lw s10, 68(a5)
1958 ; RV64IZCMP-NEXT: lw s11, 72(a5)
1959 ; RV64IZCMP-NEXT: lw ra, 76(a5)
1960 ; RV64IZCMP-NEXT: lw s1, 80(a5)
1961 ; RV64IZCMP-NEXT: lw t3, 84(a5)
1962 ; RV64IZCMP-NEXT: lw t2, 88(a5)
1963 ; RV64IZCMP-NEXT: lw t1, 92(a5)
1964 ; RV64IZCMP-NEXT: lw t0, 96(a5)
1965 ; RV64IZCMP-NEXT: lw s0, 100(a5)
1966 ; RV64IZCMP-NEXT: lw a7, 104(a5)
1967 ; RV64IZCMP-NEXT: lw a4, 108(a5)
1968 ; RV64IZCMP-NEXT: lw a0, 124(a5)
1969 ; RV64IZCMP-NEXT: lw a1, 120(a5)
1970 ; RV64IZCMP-NEXT: lw a2, 116(a5)
1971 ; RV64IZCMP-NEXT: lw a3, 112(a5)
1972 ; RV64IZCMP-NEXT: sw a0, 124(a5)
1973 ; RV64IZCMP-NEXT: sw a1, 120(a5)
1974 ; RV64IZCMP-NEXT: sw a2, 116(a5)
1975 ; RV64IZCMP-NEXT: sw a3, 112(a5)
1976 ; RV64IZCMP-NEXT: sw a4, 108(a5)
1977 ; RV64IZCMP-NEXT: sw a7, 104(a5)
1978 ; RV64IZCMP-NEXT: sw s0, 100(a5)
1979 ; RV64IZCMP-NEXT: sw t0, 96(a5)
1980 ; RV64IZCMP-NEXT: sw t1, 92(a5)
1981 ; RV64IZCMP-NEXT: sw t2, 88(a5)
1982 ; RV64IZCMP-NEXT: sw t3, 84(a5)
1983 ; RV64IZCMP-NEXT: sw s1, 80(a5)
1984 ; RV64IZCMP-NEXT: sw ra, 76(a5)
1985 ; RV64IZCMP-NEXT: sw s11, 72(a5)
1986 ; RV64IZCMP-NEXT: sw s10, 68(a5)
1987 ; RV64IZCMP-NEXT: sw s9, 64(a5)
1988 ; RV64IZCMP-NEXT: sw s8, 60(a5)
1989 ; RV64IZCMP-NEXT: sw s7, 56(a5)
1990 ; RV64IZCMP-NEXT: sw s6, 52(a5)
1991 ; RV64IZCMP-NEXT: sw s5, 48(a5)
1992 ; RV64IZCMP-NEXT: sw s4, 44(a5)
1993 ; RV64IZCMP-NEXT: sw s3, 40(a5)
1994 ; RV64IZCMP-NEXT: sw s2, 36(a5)
1995 ; RV64IZCMP-NEXT: sw t6, 32(a5)
1996 ; RV64IZCMP-NEXT: sw t5, 28(a5)
1997 ; RV64IZCMP-NEXT: sw t4, 24(a5)
1998 ; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
1999 ; RV64IZCMP-NEXT: sw a0, 20(a5)
2000 ; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
2001 ; RV64IZCMP-NEXT: sw a0, 16(a5)
2002 ; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
2003 ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2004 ; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
2005 ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2006 ; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
2007 ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2008 ; RV64IZCMP-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
2009 ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(a6)
2010 ; RV64IZCMP-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
2011 ; RV64IZCMP-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
2012 ; RV64IZCMP-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
2013 ; RV64IZCMP-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
2014 ; RV64IZCMP-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
2015 ; RV64IZCMP-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
2016 ; RV64IZCMP-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
2017 ; RV64IZCMP-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
2018 ; RV64IZCMP-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
2019 ; RV64IZCMP-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
2020 ; RV64IZCMP-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
2021 ; RV64IZCMP-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
2022 ; RV64IZCMP-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
2023 ; RV64IZCMP-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
2024 ; RV64IZCMP-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
2025 ; RV64IZCMP-NEXT: addi sp, sp, 128
2026 ; RV64IZCMP-NEXT: cm.pop {ra, s0-s11}, 160
2027 ; RV64IZCMP-NEXT: mret
2029 ; RV32IZCMP-SR-LABEL: callee_with_irq:
2030 ; RV32IZCMP-SR: # %bb.0:
2031 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -112
2032 ; RV32IZCMP-SR-NEXT: addi sp, sp, -48
2033 ; RV32IZCMP-SR-NEXT: sw t0, 92(sp) # 4-byte Folded Spill
2034 ; RV32IZCMP-SR-NEXT: sw t1, 88(sp) # 4-byte Folded Spill
2035 ; RV32IZCMP-SR-NEXT: sw t2, 84(sp) # 4-byte Folded Spill
2036 ; RV32IZCMP-SR-NEXT: sw a0, 80(sp) # 4-byte Folded Spill
2037 ; RV32IZCMP-SR-NEXT: sw a1, 76(sp) # 4-byte Folded Spill
2038 ; RV32IZCMP-SR-NEXT: sw a2, 72(sp) # 4-byte Folded Spill
2039 ; RV32IZCMP-SR-NEXT: sw a3, 68(sp) # 4-byte Folded Spill
2040 ; RV32IZCMP-SR-NEXT: sw a4, 64(sp) # 4-byte Folded Spill
2041 ; RV32IZCMP-SR-NEXT: sw a5, 60(sp) # 4-byte Folded Spill
2042 ; RV32IZCMP-SR-NEXT: sw a6, 56(sp) # 4-byte Folded Spill
2043 ; RV32IZCMP-SR-NEXT: sw a7, 52(sp) # 4-byte Folded Spill
2044 ; RV32IZCMP-SR-NEXT: sw t3, 48(sp) # 4-byte Folded Spill
2045 ; RV32IZCMP-SR-NEXT: sw t4, 44(sp) # 4-byte Folded Spill
2046 ; RV32IZCMP-SR-NEXT: sw t5, 40(sp) # 4-byte Folded Spill
2047 ; RV32IZCMP-SR-NEXT: sw t6, 36(sp) # 4-byte Folded Spill
2048 ; RV32IZCMP-SR-NEXT: lui a6, %hi(var_test_irq)
2049 ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a6)
2050 ; RV32IZCMP-SR-NEXT: sw a0, 32(sp) # 4-byte Folded Spill
2051 ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2052 ; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
2053 ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2054 ; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
2055 ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2056 ; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
2057 ; RV32IZCMP-SR-NEXT: addi a5, a6, %lo(var_test_irq)
2058 ; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
2059 ; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
2060 ; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
2061 ; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
2062 ; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
2063 ; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
2064 ; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
2065 ; RV32IZCMP-SR-NEXT: lw s2, 36(a5)
2066 ; RV32IZCMP-SR-NEXT: lw s3, 40(a5)
2067 ; RV32IZCMP-SR-NEXT: lw s4, 44(a5)
2068 ; RV32IZCMP-SR-NEXT: lw s5, 48(a5)
2069 ; RV32IZCMP-SR-NEXT: lw s6, 52(a5)
2070 ; RV32IZCMP-SR-NEXT: lw s7, 56(a5)
2071 ; RV32IZCMP-SR-NEXT: lw s8, 60(a5)
2072 ; RV32IZCMP-SR-NEXT: lw s9, 64(a5)
2073 ; RV32IZCMP-SR-NEXT: lw s10, 68(a5)
2074 ; RV32IZCMP-SR-NEXT: lw s11, 72(a5)
2075 ; RV32IZCMP-SR-NEXT: lw ra, 76(a5)
2076 ; RV32IZCMP-SR-NEXT: lw s1, 80(a5)
2077 ; RV32IZCMP-SR-NEXT: lw t3, 84(a5)
2078 ; RV32IZCMP-SR-NEXT: lw t2, 88(a5)
2079 ; RV32IZCMP-SR-NEXT: lw t1, 92(a5)
2080 ; RV32IZCMP-SR-NEXT: lw t0, 96(a5)
2081 ; RV32IZCMP-SR-NEXT: lw s0, 100(a5)
2082 ; RV32IZCMP-SR-NEXT: lw a7, 104(a5)
2083 ; RV32IZCMP-SR-NEXT: lw a4, 108(a5)
2084 ; RV32IZCMP-SR-NEXT: lw a0, 124(a5)
2085 ; RV32IZCMP-SR-NEXT: lw a1, 120(a5)
2086 ; RV32IZCMP-SR-NEXT: lw a2, 116(a5)
2087 ; RV32IZCMP-SR-NEXT: lw a3, 112(a5)
2088 ; RV32IZCMP-SR-NEXT: sw a0, 124(a5)
2089 ; RV32IZCMP-SR-NEXT: sw a1, 120(a5)
2090 ; RV32IZCMP-SR-NEXT: sw a2, 116(a5)
2091 ; RV32IZCMP-SR-NEXT: sw a3, 112(a5)
2092 ; RV32IZCMP-SR-NEXT: sw a4, 108(a5)
2093 ; RV32IZCMP-SR-NEXT: sw a7, 104(a5)
2094 ; RV32IZCMP-SR-NEXT: sw s0, 100(a5)
2095 ; RV32IZCMP-SR-NEXT: sw t0, 96(a5)
2096 ; RV32IZCMP-SR-NEXT: sw t1, 92(a5)
2097 ; RV32IZCMP-SR-NEXT: sw t2, 88(a5)
2098 ; RV32IZCMP-SR-NEXT: sw t3, 84(a5)
2099 ; RV32IZCMP-SR-NEXT: sw s1, 80(a5)
2100 ; RV32IZCMP-SR-NEXT: sw ra, 76(a5)
2101 ; RV32IZCMP-SR-NEXT: sw s11, 72(a5)
2102 ; RV32IZCMP-SR-NEXT: sw s10, 68(a5)
2103 ; RV32IZCMP-SR-NEXT: sw s9, 64(a5)
2104 ; RV32IZCMP-SR-NEXT: sw s8, 60(a5)
2105 ; RV32IZCMP-SR-NEXT: sw s7, 56(a5)
2106 ; RV32IZCMP-SR-NEXT: sw s6, 52(a5)
2107 ; RV32IZCMP-SR-NEXT: sw s5, 48(a5)
2108 ; RV32IZCMP-SR-NEXT: sw s4, 44(a5)
2109 ; RV32IZCMP-SR-NEXT: sw s3, 40(a5)
2110 ; RV32IZCMP-SR-NEXT: sw s2, 36(a5)
2111 ; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
2112 ; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
2113 ; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
2114 ; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
2115 ; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
2116 ; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
2117 ; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
2118 ; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
2119 ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2120 ; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
2121 ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2122 ; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
2123 ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2124 ; RV32IZCMP-SR-NEXT: lw a0, 32(sp) # 4-byte Folded Reload
2125 ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a6)
2126 ; RV32IZCMP-SR-NEXT: lw t0, 92(sp) # 4-byte Folded Reload
2127 ; RV32IZCMP-SR-NEXT: lw t1, 88(sp) # 4-byte Folded Reload
2128 ; RV32IZCMP-SR-NEXT: lw t2, 84(sp) # 4-byte Folded Reload
2129 ; RV32IZCMP-SR-NEXT: lw a0, 80(sp) # 4-byte Folded Reload
2130 ; RV32IZCMP-SR-NEXT: lw a1, 76(sp) # 4-byte Folded Reload
2131 ; RV32IZCMP-SR-NEXT: lw a2, 72(sp) # 4-byte Folded Reload
2132 ; RV32IZCMP-SR-NEXT: lw a3, 68(sp) # 4-byte Folded Reload
2133 ; RV32IZCMP-SR-NEXT: lw a4, 64(sp) # 4-byte Folded Reload
2134 ; RV32IZCMP-SR-NEXT: lw a5, 60(sp) # 4-byte Folded Reload
2135 ; RV32IZCMP-SR-NEXT: lw a6, 56(sp) # 4-byte Folded Reload
2136 ; RV32IZCMP-SR-NEXT: lw a7, 52(sp) # 4-byte Folded Reload
2137 ; RV32IZCMP-SR-NEXT: lw t3, 48(sp) # 4-byte Folded Reload
2138 ; RV32IZCMP-SR-NEXT: lw t4, 44(sp) # 4-byte Folded Reload
2139 ; RV32IZCMP-SR-NEXT: lw t5, 40(sp) # 4-byte Folded Reload
2140 ; RV32IZCMP-SR-NEXT: lw t6, 36(sp) # 4-byte Folded Reload
2141 ; RV32IZCMP-SR-NEXT: addi sp, sp, 48
2142 ; RV32IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 112
2143 ; RV32IZCMP-SR-NEXT: mret
2145 ; RV64IZCMP-SR-LABEL: callee_with_irq:
2146 ; RV64IZCMP-SR: # %bb.0:
2147 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -160
2148 ; RV64IZCMP-SR-NEXT: addi sp, sp, -128
2149 ; RV64IZCMP-SR-NEXT: sd t0, 168(sp) # 8-byte Folded Spill
2150 ; RV64IZCMP-SR-NEXT: sd t1, 160(sp) # 8-byte Folded Spill
2151 ; RV64IZCMP-SR-NEXT: sd t2, 152(sp) # 8-byte Folded Spill
2152 ; RV64IZCMP-SR-NEXT: sd a0, 144(sp) # 8-byte Folded Spill
2153 ; RV64IZCMP-SR-NEXT: sd a1, 136(sp) # 8-byte Folded Spill
2154 ; RV64IZCMP-SR-NEXT: sd a2, 128(sp) # 8-byte Folded Spill
2155 ; RV64IZCMP-SR-NEXT: sd a3, 120(sp) # 8-byte Folded Spill
2156 ; RV64IZCMP-SR-NEXT: sd a4, 112(sp) # 8-byte Folded Spill
2157 ; RV64IZCMP-SR-NEXT: sd a5, 104(sp) # 8-byte Folded Spill
2158 ; RV64IZCMP-SR-NEXT: sd a6, 96(sp) # 8-byte Folded Spill
2159 ; RV64IZCMP-SR-NEXT: sd a7, 88(sp) # 8-byte Folded Spill
2160 ; RV64IZCMP-SR-NEXT: sd t3, 80(sp) # 8-byte Folded Spill
2161 ; RV64IZCMP-SR-NEXT: sd t4, 72(sp) # 8-byte Folded Spill
2162 ; RV64IZCMP-SR-NEXT: sd t5, 64(sp) # 8-byte Folded Spill
2163 ; RV64IZCMP-SR-NEXT: sd t6, 56(sp) # 8-byte Folded Spill
2164 ; RV64IZCMP-SR-NEXT: lui a6, %hi(var_test_irq)
2165 ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a6)
2166 ; RV64IZCMP-SR-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
2167 ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2168 ; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
2169 ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2170 ; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
2171 ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2172 ; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
2173 ; RV64IZCMP-SR-NEXT: addi a5, a6, %lo(var_test_irq)
2174 ; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
2175 ; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
2176 ; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
2177 ; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
2178 ; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
2179 ; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
2180 ; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
2181 ; RV64IZCMP-SR-NEXT: lw s2, 36(a5)
2182 ; RV64IZCMP-SR-NEXT: lw s3, 40(a5)
2183 ; RV64IZCMP-SR-NEXT: lw s4, 44(a5)
2184 ; RV64IZCMP-SR-NEXT: lw s5, 48(a5)
2185 ; RV64IZCMP-SR-NEXT: lw s6, 52(a5)
2186 ; RV64IZCMP-SR-NEXT: lw s7, 56(a5)
2187 ; RV64IZCMP-SR-NEXT: lw s8, 60(a5)
2188 ; RV64IZCMP-SR-NEXT: lw s9, 64(a5)
2189 ; RV64IZCMP-SR-NEXT: lw s10, 68(a5)
2190 ; RV64IZCMP-SR-NEXT: lw s11, 72(a5)
2191 ; RV64IZCMP-SR-NEXT: lw ra, 76(a5)
2192 ; RV64IZCMP-SR-NEXT: lw s1, 80(a5)
2193 ; RV64IZCMP-SR-NEXT: lw t3, 84(a5)
2194 ; RV64IZCMP-SR-NEXT: lw t2, 88(a5)
2195 ; RV64IZCMP-SR-NEXT: lw t1, 92(a5)
2196 ; RV64IZCMP-SR-NEXT: lw t0, 96(a5)
2197 ; RV64IZCMP-SR-NEXT: lw s0, 100(a5)
2198 ; RV64IZCMP-SR-NEXT: lw a7, 104(a5)
2199 ; RV64IZCMP-SR-NEXT: lw a4, 108(a5)
2200 ; RV64IZCMP-SR-NEXT: lw a0, 124(a5)
2201 ; RV64IZCMP-SR-NEXT: lw a1, 120(a5)
2202 ; RV64IZCMP-SR-NEXT: lw a2, 116(a5)
2203 ; RV64IZCMP-SR-NEXT: lw a3, 112(a5)
2204 ; RV64IZCMP-SR-NEXT: sw a0, 124(a5)
2205 ; RV64IZCMP-SR-NEXT: sw a1, 120(a5)
2206 ; RV64IZCMP-SR-NEXT: sw a2, 116(a5)
2207 ; RV64IZCMP-SR-NEXT: sw a3, 112(a5)
2208 ; RV64IZCMP-SR-NEXT: sw a4, 108(a5)
2209 ; RV64IZCMP-SR-NEXT: sw a7, 104(a5)
2210 ; RV64IZCMP-SR-NEXT: sw s0, 100(a5)
2211 ; RV64IZCMP-SR-NEXT: sw t0, 96(a5)
2212 ; RV64IZCMP-SR-NEXT: sw t1, 92(a5)
2213 ; RV64IZCMP-SR-NEXT: sw t2, 88(a5)
2214 ; RV64IZCMP-SR-NEXT: sw t3, 84(a5)
2215 ; RV64IZCMP-SR-NEXT: sw s1, 80(a5)
2216 ; RV64IZCMP-SR-NEXT: sw ra, 76(a5)
2217 ; RV64IZCMP-SR-NEXT: sw s11, 72(a5)
2218 ; RV64IZCMP-SR-NEXT: sw s10, 68(a5)
2219 ; RV64IZCMP-SR-NEXT: sw s9, 64(a5)
2220 ; RV64IZCMP-SR-NEXT: sw s8, 60(a5)
2221 ; RV64IZCMP-SR-NEXT: sw s7, 56(a5)
2222 ; RV64IZCMP-SR-NEXT: sw s6, 52(a5)
2223 ; RV64IZCMP-SR-NEXT: sw s5, 48(a5)
2224 ; RV64IZCMP-SR-NEXT: sw s4, 44(a5)
2225 ; RV64IZCMP-SR-NEXT: sw s3, 40(a5)
2226 ; RV64IZCMP-SR-NEXT: sw s2, 36(a5)
2227 ; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
2228 ; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
2229 ; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
2230 ; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
2231 ; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
2232 ; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
2233 ; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
2234 ; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
2235 ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2236 ; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
2237 ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2238 ; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
2239 ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2240 ; RV64IZCMP-SR-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
2241 ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a6)
2242 ; RV64IZCMP-SR-NEXT: ld t0, 168(sp) # 8-byte Folded Reload
2243 ; RV64IZCMP-SR-NEXT: ld t1, 160(sp) # 8-byte Folded Reload
2244 ; RV64IZCMP-SR-NEXT: ld t2, 152(sp) # 8-byte Folded Reload
2245 ; RV64IZCMP-SR-NEXT: ld a0, 144(sp) # 8-byte Folded Reload
2246 ; RV64IZCMP-SR-NEXT: ld a1, 136(sp) # 8-byte Folded Reload
2247 ; RV64IZCMP-SR-NEXT: ld a2, 128(sp) # 8-byte Folded Reload
2248 ; RV64IZCMP-SR-NEXT: ld a3, 120(sp) # 8-byte Folded Reload
2249 ; RV64IZCMP-SR-NEXT: ld a4, 112(sp) # 8-byte Folded Reload
2250 ; RV64IZCMP-SR-NEXT: ld a5, 104(sp) # 8-byte Folded Reload
2251 ; RV64IZCMP-SR-NEXT: ld a6, 96(sp) # 8-byte Folded Reload
2252 ; RV64IZCMP-SR-NEXT: ld a7, 88(sp) # 8-byte Folded Reload
2253 ; RV64IZCMP-SR-NEXT: ld t3, 80(sp) # 8-byte Folded Reload
2254 ; RV64IZCMP-SR-NEXT: ld t4, 72(sp) # 8-byte Folded Reload
2255 ; RV64IZCMP-SR-NEXT: ld t5, 64(sp) # 8-byte Folded Reload
2256 ; RV64IZCMP-SR-NEXT: ld t6, 56(sp) # 8-byte Folded Reload
2257 ; RV64IZCMP-SR-NEXT: addi sp, sp, 128
2258 ; RV64IZCMP-SR-NEXT: cm.pop {ra, s0-s11}, 160
2259 ; RV64IZCMP-SR-NEXT: mret
2261 ; RV32I-LABEL: callee_with_irq:
2263 ; RV32I-NEXT: addi sp, sp, -144
2264 ; RV32I-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
2265 ; RV32I-NEXT: sw t0, 136(sp) # 4-byte Folded Spill
2266 ; RV32I-NEXT: sw t1, 132(sp) # 4-byte Folded Spill
2267 ; RV32I-NEXT: sw t2, 128(sp) # 4-byte Folded Spill
2268 ; RV32I-NEXT: sw s0, 124(sp) # 4-byte Folded Spill
2269 ; RV32I-NEXT: sw s1, 120(sp) # 4-byte Folded Spill
2270 ; RV32I-NEXT: sw a0, 116(sp) # 4-byte Folded Spill
2271 ; RV32I-NEXT: sw a1, 112(sp) # 4-byte Folded Spill
2272 ; RV32I-NEXT: sw a2, 108(sp) # 4-byte Folded Spill
2273 ; RV32I-NEXT: sw a3, 104(sp) # 4-byte Folded Spill
2274 ; RV32I-NEXT: sw a4, 100(sp) # 4-byte Folded Spill
2275 ; RV32I-NEXT: sw a5, 96(sp) # 4-byte Folded Spill
2276 ; RV32I-NEXT: sw a6, 92(sp) # 4-byte Folded Spill
2277 ; RV32I-NEXT: sw a7, 88(sp) # 4-byte Folded Spill
2278 ; RV32I-NEXT: sw s2, 84(sp) # 4-byte Folded Spill
2279 ; RV32I-NEXT: sw s3, 80(sp) # 4-byte Folded Spill
2280 ; RV32I-NEXT: sw s4, 76(sp) # 4-byte Folded Spill
2281 ; RV32I-NEXT: sw s5, 72(sp) # 4-byte Folded Spill
2282 ; RV32I-NEXT: sw s6, 68(sp) # 4-byte Folded Spill
2283 ; RV32I-NEXT: sw s7, 64(sp) # 4-byte Folded Spill
2284 ; RV32I-NEXT: sw s8, 60(sp) # 4-byte Folded Spill
2285 ; RV32I-NEXT: sw s9, 56(sp) # 4-byte Folded Spill
2286 ; RV32I-NEXT: sw s10, 52(sp) # 4-byte Folded Spill
2287 ; RV32I-NEXT: sw s11, 48(sp) # 4-byte Folded Spill
2288 ; RV32I-NEXT: sw t3, 44(sp) # 4-byte Folded Spill
2289 ; RV32I-NEXT: sw t4, 40(sp) # 4-byte Folded Spill
2290 ; RV32I-NEXT: sw t5, 36(sp) # 4-byte Folded Spill
2291 ; RV32I-NEXT: sw t6, 32(sp) # 4-byte Folded Spill
2292 ; RV32I-NEXT: lui a6, %hi(var_test_irq)
2293 ; RV32I-NEXT: lw a0, %lo(var_test_irq)(a6)
2294 ; RV32I-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
2295 ; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2296 ; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
2297 ; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2298 ; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
2299 ; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2300 ; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
2301 ; RV32I-NEXT: addi a5, a6, %lo(var_test_irq)
2302 ; RV32I-NEXT: lw a0, 16(a5)
2303 ; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
2304 ; RV32I-NEXT: lw a0, 20(a5)
2305 ; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
2306 ; RV32I-NEXT: lw t0, 24(a5)
2307 ; RV32I-NEXT: lw t1, 28(a5)
2308 ; RV32I-NEXT: lw t2, 32(a5)
2309 ; RV32I-NEXT: lw t3, 36(a5)
2310 ; RV32I-NEXT: lw t4, 40(a5)
2311 ; RV32I-NEXT: lw t5, 44(a5)
2312 ; RV32I-NEXT: lw t6, 48(a5)
2313 ; RV32I-NEXT: lw s0, 52(a5)
2314 ; RV32I-NEXT: lw s1, 56(a5)
2315 ; RV32I-NEXT: lw s2, 60(a5)
2316 ; RV32I-NEXT: lw s3, 64(a5)
2317 ; RV32I-NEXT: lw s4, 68(a5)
2318 ; RV32I-NEXT: lw s5, 72(a5)
2319 ; RV32I-NEXT: lw s6, 76(a5)
2320 ; RV32I-NEXT: lw s7, 80(a5)
2321 ; RV32I-NEXT: lw s8, 84(a5)
2322 ; RV32I-NEXT: lw s9, 88(a5)
2323 ; RV32I-NEXT: lw s10, 92(a5)
2324 ; RV32I-NEXT: lw s11, 96(a5)
2325 ; RV32I-NEXT: lw ra, 100(a5)
2326 ; RV32I-NEXT: lw a7, 104(a5)
2327 ; RV32I-NEXT: lw a4, 108(a5)
2328 ; RV32I-NEXT: lw a0, 124(a5)
2329 ; RV32I-NEXT: lw a1, 120(a5)
2330 ; RV32I-NEXT: lw a2, 116(a5)
2331 ; RV32I-NEXT: lw a3, 112(a5)
2332 ; RV32I-NEXT: sw a0, 124(a5)
2333 ; RV32I-NEXT: sw a1, 120(a5)
2334 ; RV32I-NEXT: sw a2, 116(a5)
2335 ; RV32I-NEXT: sw a3, 112(a5)
2336 ; RV32I-NEXT: sw a4, 108(a5)
2337 ; RV32I-NEXT: sw a7, 104(a5)
2338 ; RV32I-NEXT: sw ra, 100(a5)
2339 ; RV32I-NEXT: sw s11, 96(a5)
2340 ; RV32I-NEXT: sw s10, 92(a5)
2341 ; RV32I-NEXT: sw s9, 88(a5)
2342 ; RV32I-NEXT: sw s8, 84(a5)
2343 ; RV32I-NEXT: sw s7, 80(a5)
2344 ; RV32I-NEXT: sw s6, 76(a5)
2345 ; RV32I-NEXT: sw s5, 72(a5)
2346 ; RV32I-NEXT: sw s4, 68(a5)
2347 ; RV32I-NEXT: sw s3, 64(a5)
2348 ; RV32I-NEXT: sw s2, 60(a5)
2349 ; RV32I-NEXT: sw s1, 56(a5)
2350 ; RV32I-NEXT: sw s0, 52(a5)
2351 ; RV32I-NEXT: sw t6, 48(a5)
2352 ; RV32I-NEXT: sw t5, 44(a5)
2353 ; RV32I-NEXT: sw t4, 40(a5)
2354 ; RV32I-NEXT: sw t3, 36(a5)
2355 ; RV32I-NEXT: sw t2, 32(a5)
2356 ; RV32I-NEXT: sw t1, 28(a5)
2357 ; RV32I-NEXT: sw t0, 24(a5)
2358 ; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
2359 ; RV32I-NEXT: sw a0, 20(a5)
2360 ; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
2361 ; RV32I-NEXT: sw a0, 16(a5)
2362 ; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
2363 ; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2364 ; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
2365 ; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2366 ; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
2367 ; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2368 ; RV32I-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
2369 ; RV32I-NEXT: sw a0, %lo(var_test_irq)(a6)
2370 ; RV32I-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
2371 ; RV32I-NEXT: lw t0, 136(sp) # 4-byte Folded Reload
2372 ; RV32I-NEXT: lw t1, 132(sp) # 4-byte Folded Reload
2373 ; RV32I-NEXT: lw t2, 128(sp) # 4-byte Folded Reload
2374 ; RV32I-NEXT: lw s0, 124(sp) # 4-byte Folded Reload
2375 ; RV32I-NEXT: lw s1, 120(sp) # 4-byte Folded Reload
2376 ; RV32I-NEXT: lw a0, 116(sp) # 4-byte Folded Reload
2377 ; RV32I-NEXT: lw a1, 112(sp) # 4-byte Folded Reload
2378 ; RV32I-NEXT: lw a2, 108(sp) # 4-byte Folded Reload
2379 ; RV32I-NEXT: lw a3, 104(sp) # 4-byte Folded Reload
2380 ; RV32I-NEXT: lw a4, 100(sp) # 4-byte Folded Reload
2381 ; RV32I-NEXT: lw a5, 96(sp) # 4-byte Folded Reload
2382 ; RV32I-NEXT: lw a6, 92(sp) # 4-byte Folded Reload
2383 ; RV32I-NEXT: lw a7, 88(sp) # 4-byte Folded Reload
2384 ; RV32I-NEXT: lw s2, 84(sp) # 4-byte Folded Reload
2385 ; RV32I-NEXT: lw s3, 80(sp) # 4-byte Folded Reload
2386 ; RV32I-NEXT: lw s4, 76(sp) # 4-byte Folded Reload
2387 ; RV32I-NEXT: lw s5, 72(sp) # 4-byte Folded Reload
2388 ; RV32I-NEXT: lw s6, 68(sp) # 4-byte Folded Reload
2389 ; RV32I-NEXT: lw s7, 64(sp) # 4-byte Folded Reload
2390 ; RV32I-NEXT: lw s8, 60(sp) # 4-byte Folded Reload
2391 ; RV32I-NEXT: lw s9, 56(sp) # 4-byte Folded Reload
2392 ; RV32I-NEXT: lw s10, 52(sp) # 4-byte Folded Reload
2393 ; RV32I-NEXT: lw s11, 48(sp) # 4-byte Folded Reload
2394 ; RV32I-NEXT: lw t3, 44(sp) # 4-byte Folded Reload
2395 ; RV32I-NEXT: lw t4, 40(sp) # 4-byte Folded Reload
2396 ; RV32I-NEXT: lw t5, 36(sp) # 4-byte Folded Reload
2397 ; RV32I-NEXT: lw t6, 32(sp) # 4-byte Folded Reload
2398 ; RV32I-NEXT: addi sp, sp, 144
2401 ; RV64I-LABEL: callee_with_irq:
2403 ; RV64I-NEXT: addi sp, sp, -272
2404 ; RV64I-NEXT: sd ra, 264(sp) # 8-byte Folded Spill
2405 ; RV64I-NEXT: sd t0, 256(sp) # 8-byte Folded Spill
2406 ; RV64I-NEXT: sd t1, 248(sp) # 8-byte Folded Spill
2407 ; RV64I-NEXT: sd t2, 240(sp) # 8-byte Folded Spill
2408 ; RV64I-NEXT: sd s0, 232(sp) # 8-byte Folded Spill
2409 ; RV64I-NEXT: sd s1, 224(sp) # 8-byte Folded Spill
2410 ; RV64I-NEXT: sd a0, 216(sp) # 8-byte Folded Spill
2411 ; RV64I-NEXT: sd a1, 208(sp) # 8-byte Folded Spill
2412 ; RV64I-NEXT: sd a2, 200(sp) # 8-byte Folded Spill
2413 ; RV64I-NEXT: sd a3, 192(sp) # 8-byte Folded Spill
2414 ; RV64I-NEXT: sd a4, 184(sp) # 8-byte Folded Spill
2415 ; RV64I-NEXT: sd a5, 176(sp) # 8-byte Folded Spill
2416 ; RV64I-NEXT: sd a6, 168(sp) # 8-byte Folded Spill
2417 ; RV64I-NEXT: sd a7, 160(sp) # 8-byte Folded Spill
2418 ; RV64I-NEXT: sd s2, 152(sp) # 8-byte Folded Spill
2419 ; RV64I-NEXT: sd s3, 144(sp) # 8-byte Folded Spill
2420 ; RV64I-NEXT: sd s4, 136(sp) # 8-byte Folded Spill
2421 ; RV64I-NEXT: sd s5, 128(sp) # 8-byte Folded Spill
2422 ; RV64I-NEXT: sd s6, 120(sp) # 8-byte Folded Spill
2423 ; RV64I-NEXT: sd s7, 112(sp) # 8-byte Folded Spill
2424 ; RV64I-NEXT: sd s8, 104(sp) # 8-byte Folded Spill
2425 ; RV64I-NEXT: sd s9, 96(sp) # 8-byte Folded Spill
2426 ; RV64I-NEXT: sd s10, 88(sp) # 8-byte Folded Spill
2427 ; RV64I-NEXT: sd s11, 80(sp) # 8-byte Folded Spill
2428 ; RV64I-NEXT: sd t3, 72(sp) # 8-byte Folded Spill
2429 ; RV64I-NEXT: sd t4, 64(sp) # 8-byte Folded Spill
2430 ; RV64I-NEXT: sd t5, 56(sp) # 8-byte Folded Spill
2431 ; RV64I-NEXT: sd t6, 48(sp) # 8-byte Folded Spill
2432 ; RV64I-NEXT: lui a6, %hi(var_test_irq)
2433 ; RV64I-NEXT: lw a0, %lo(var_test_irq)(a6)
2434 ; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
2435 ; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2436 ; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
2437 ; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2438 ; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
2439 ; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2440 ; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
2441 ; RV64I-NEXT: addi a5, a6, %lo(var_test_irq)
2442 ; RV64I-NEXT: lw a0, 16(a5)
2443 ; RV64I-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
2444 ; RV64I-NEXT: lw a0, 20(a5)
2445 ; RV64I-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
2446 ; RV64I-NEXT: lw t0, 24(a5)
2447 ; RV64I-NEXT: lw t1, 28(a5)
2448 ; RV64I-NEXT: lw t2, 32(a5)
2449 ; RV64I-NEXT: lw t3, 36(a5)
2450 ; RV64I-NEXT: lw t4, 40(a5)
2451 ; RV64I-NEXT: lw t5, 44(a5)
2452 ; RV64I-NEXT: lw t6, 48(a5)
2453 ; RV64I-NEXT: lw s0, 52(a5)
2454 ; RV64I-NEXT: lw s1, 56(a5)
2455 ; RV64I-NEXT: lw s2, 60(a5)
2456 ; RV64I-NEXT: lw s3, 64(a5)
2457 ; RV64I-NEXT: lw s4, 68(a5)
2458 ; RV64I-NEXT: lw s5, 72(a5)
2459 ; RV64I-NEXT: lw s6, 76(a5)
2460 ; RV64I-NEXT: lw s7, 80(a5)
2461 ; RV64I-NEXT: lw s8, 84(a5)
2462 ; RV64I-NEXT: lw s9, 88(a5)
2463 ; RV64I-NEXT: lw s10, 92(a5)
2464 ; RV64I-NEXT: lw s11, 96(a5)
2465 ; RV64I-NEXT: lw ra, 100(a5)
2466 ; RV64I-NEXT: lw a7, 104(a5)
2467 ; RV64I-NEXT: lw a4, 108(a5)
2468 ; RV64I-NEXT: lw a0, 124(a5)
2469 ; RV64I-NEXT: lw a1, 120(a5)
2470 ; RV64I-NEXT: lw a2, 116(a5)
2471 ; RV64I-NEXT: lw a3, 112(a5)
2472 ; RV64I-NEXT: sw a0, 124(a5)
2473 ; RV64I-NEXT: sw a1, 120(a5)
2474 ; RV64I-NEXT: sw a2, 116(a5)
2475 ; RV64I-NEXT: sw a3, 112(a5)
2476 ; RV64I-NEXT: sw a4, 108(a5)
2477 ; RV64I-NEXT: sw a7, 104(a5)
2478 ; RV64I-NEXT: sw ra, 100(a5)
2479 ; RV64I-NEXT: sw s11, 96(a5)
2480 ; RV64I-NEXT: sw s10, 92(a5)
2481 ; RV64I-NEXT: sw s9, 88(a5)
2482 ; RV64I-NEXT: sw s8, 84(a5)
2483 ; RV64I-NEXT: sw s7, 80(a5)
2484 ; RV64I-NEXT: sw s6, 76(a5)
2485 ; RV64I-NEXT: sw s5, 72(a5)
2486 ; RV64I-NEXT: sw s4, 68(a5)
2487 ; RV64I-NEXT: sw s3, 64(a5)
2488 ; RV64I-NEXT: sw s2, 60(a5)
2489 ; RV64I-NEXT: sw s1, 56(a5)
2490 ; RV64I-NEXT: sw s0, 52(a5)
2491 ; RV64I-NEXT: sw t6, 48(a5)
2492 ; RV64I-NEXT: sw t5, 44(a5)
2493 ; RV64I-NEXT: sw t4, 40(a5)
2494 ; RV64I-NEXT: sw t3, 36(a5)
2495 ; RV64I-NEXT: sw t2, 32(a5)
2496 ; RV64I-NEXT: sw t1, 28(a5)
2497 ; RV64I-NEXT: sw t0, 24(a5)
2498 ; RV64I-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
2499 ; RV64I-NEXT: sw a0, 20(a5)
2500 ; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
2501 ; RV64I-NEXT: sw a0, 16(a5)
2502 ; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
2503 ; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2504 ; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
2505 ; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2506 ; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
2507 ; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2508 ; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
2509 ; RV64I-NEXT: sw a0, %lo(var_test_irq)(a6)
2510 ; RV64I-NEXT: ld ra, 264(sp) # 8-byte Folded Reload
2511 ; RV64I-NEXT: ld t0, 256(sp) # 8-byte Folded Reload
2512 ; RV64I-NEXT: ld t1, 248(sp) # 8-byte Folded Reload
2513 ; RV64I-NEXT: ld t2, 240(sp) # 8-byte Folded Reload
2514 ; RV64I-NEXT: ld s0, 232(sp) # 8-byte Folded Reload
2515 ; RV64I-NEXT: ld s1, 224(sp) # 8-byte Folded Reload
2516 ; RV64I-NEXT: ld a0, 216(sp) # 8-byte Folded Reload
2517 ; RV64I-NEXT: ld a1, 208(sp) # 8-byte Folded Reload
2518 ; RV64I-NEXT: ld a2, 200(sp) # 8-byte Folded Reload
2519 ; RV64I-NEXT: ld a3, 192(sp) # 8-byte Folded Reload
2520 ; RV64I-NEXT: ld a4, 184(sp) # 8-byte Folded Reload
2521 ; RV64I-NEXT: ld a5, 176(sp) # 8-byte Folded Reload
2522 ; RV64I-NEXT: ld a6, 168(sp) # 8-byte Folded Reload
2523 ; RV64I-NEXT: ld a7, 160(sp) # 8-byte Folded Reload
2524 ; RV64I-NEXT: ld s2, 152(sp) # 8-byte Folded Reload
2525 ; RV64I-NEXT: ld s3, 144(sp) # 8-byte Folded Reload
2526 ; RV64I-NEXT: ld s4, 136(sp) # 8-byte Folded Reload
2527 ; RV64I-NEXT: ld s5, 128(sp) # 8-byte Folded Reload
2528 ; RV64I-NEXT: ld s6, 120(sp) # 8-byte Folded Reload
2529 ; RV64I-NEXT: ld s7, 112(sp) # 8-byte Folded Reload
2530 ; RV64I-NEXT: ld s8, 104(sp) # 8-byte Folded Reload
2531 ; RV64I-NEXT: ld s9, 96(sp) # 8-byte Folded Reload
2532 ; RV64I-NEXT: ld s10, 88(sp) # 8-byte Folded Reload
2533 ; RV64I-NEXT: ld s11, 80(sp) # 8-byte Folded Reload
2534 ; RV64I-NEXT: ld t3, 72(sp) # 8-byte Folded Reload
2535 ; RV64I-NEXT: ld t4, 64(sp) # 8-byte Folded Reload
2536 ; RV64I-NEXT: ld t5, 56(sp) # 8-byte Folded Reload
2537 ; RV64I-NEXT: ld t6, 48(sp) # 8-byte Folded Reload
2538 ; RV64I-NEXT: addi sp, sp, 272
2540 %val = load [32 x i32], [32 x i32]* @var_test_irq
2541 store volatile [32 x i32] %val, [32 x i32]* @var_test_irq
2545 define void @callee_no_irq() nounwind{
2546 ; RV32IZCMP-LABEL: callee_no_irq:
2547 ; RV32IZCMP: # %bb.0:
2548 ; RV32IZCMP-NEXT: cm.push {ra, s0-s11}, -96
2549 ; RV32IZCMP-NEXT: lui a6, %hi(var_test_irq)
2550 ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq)(a6)
2551 ; RV32IZCMP-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
2552 ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2553 ; RV32IZCMP-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
2554 ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2555 ; RV32IZCMP-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
2556 ; RV32IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2557 ; RV32IZCMP-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
2558 ; RV32IZCMP-NEXT: addi a5, a6, %lo(var_test_irq)
2559 ; RV32IZCMP-NEXT: lw a0, 16(a5)
2560 ; RV32IZCMP-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
2561 ; RV32IZCMP-NEXT: lw a0, 20(a5)
2562 ; RV32IZCMP-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
2563 ; RV32IZCMP-NEXT: lw t4, 24(a5)
2564 ; RV32IZCMP-NEXT: lw t5, 28(a5)
2565 ; RV32IZCMP-NEXT: lw t6, 32(a5)
2566 ; RV32IZCMP-NEXT: lw s2, 36(a5)
2567 ; RV32IZCMP-NEXT: lw s3, 40(a5)
2568 ; RV32IZCMP-NEXT: lw s4, 44(a5)
2569 ; RV32IZCMP-NEXT: lw s5, 48(a5)
2570 ; RV32IZCMP-NEXT: lw s6, 52(a5)
2571 ; RV32IZCMP-NEXT: lw s7, 56(a5)
2572 ; RV32IZCMP-NEXT: lw s8, 60(a5)
2573 ; RV32IZCMP-NEXT: lw s9, 64(a5)
2574 ; RV32IZCMP-NEXT: lw s10, 68(a5)
2575 ; RV32IZCMP-NEXT: lw s11, 72(a5)
2576 ; RV32IZCMP-NEXT: lw ra, 76(a5)
2577 ; RV32IZCMP-NEXT: lw s1, 80(a5)
2578 ; RV32IZCMP-NEXT: lw t3, 84(a5)
2579 ; RV32IZCMP-NEXT: lw t2, 88(a5)
2580 ; RV32IZCMP-NEXT: lw t1, 92(a5)
2581 ; RV32IZCMP-NEXT: lw t0, 96(a5)
2582 ; RV32IZCMP-NEXT: lw s0, 100(a5)
2583 ; RV32IZCMP-NEXT: lw a7, 104(a5)
2584 ; RV32IZCMP-NEXT: lw a4, 108(a5)
2585 ; RV32IZCMP-NEXT: lw a0, 124(a5)
2586 ; RV32IZCMP-NEXT: lw a1, 120(a5)
2587 ; RV32IZCMP-NEXT: lw a2, 116(a5)
2588 ; RV32IZCMP-NEXT: lw a3, 112(a5)
2589 ; RV32IZCMP-NEXT: sw a0, 124(a5)
2590 ; RV32IZCMP-NEXT: sw a1, 120(a5)
2591 ; RV32IZCMP-NEXT: sw a2, 116(a5)
2592 ; RV32IZCMP-NEXT: sw a3, 112(a5)
2593 ; RV32IZCMP-NEXT: sw a4, 108(a5)
2594 ; RV32IZCMP-NEXT: sw a7, 104(a5)
2595 ; RV32IZCMP-NEXT: sw s0, 100(a5)
2596 ; RV32IZCMP-NEXT: sw t0, 96(a5)
2597 ; RV32IZCMP-NEXT: sw t1, 92(a5)
2598 ; RV32IZCMP-NEXT: sw t2, 88(a5)
2599 ; RV32IZCMP-NEXT: sw t3, 84(a5)
2600 ; RV32IZCMP-NEXT: sw s1, 80(a5)
2601 ; RV32IZCMP-NEXT: sw ra, 76(a5)
2602 ; RV32IZCMP-NEXT: sw s11, 72(a5)
2603 ; RV32IZCMP-NEXT: sw s10, 68(a5)
2604 ; RV32IZCMP-NEXT: sw s9, 64(a5)
2605 ; RV32IZCMP-NEXT: sw s8, 60(a5)
2606 ; RV32IZCMP-NEXT: sw s7, 56(a5)
2607 ; RV32IZCMP-NEXT: sw s6, 52(a5)
2608 ; RV32IZCMP-NEXT: sw s5, 48(a5)
2609 ; RV32IZCMP-NEXT: sw s4, 44(a5)
2610 ; RV32IZCMP-NEXT: sw s3, 40(a5)
2611 ; RV32IZCMP-NEXT: sw s2, 36(a5)
2612 ; RV32IZCMP-NEXT: sw t6, 32(a5)
2613 ; RV32IZCMP-NEXT: sw t5, 28(a5)
2614 ; RV32IZCMP-NEXT: sw t4, 24(a5)
2615 ; RV32IZCMP-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
2616 ; RV32IZCMP-NEXT: sw a0, 20(a5)
2617 ; RV32IZCMP-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
2618 ; RV32IZCMP-NEXT: sw a0, 16(a5)
2619 ; RV32IZCMP-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
2620 ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2621 ; RV32IZCMP-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
2622 ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2623 ; RV32IZCMP-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
2624 ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2625 ; RV32IZCMP-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
2626 ; RV32IZCMP-NEXT: sw a0, %lo(var_test_irq)(a6)
2627 ; RV32IZCMP-NEXT: cm.popret {ra, s0-s11}, 96
2629 ; RV64IZCMP-LABEL: callee_no_irq:
2630 ; RV64IZCMP: # %bb.0:
2631 ; RV64IZCMP-NEXT: cm.push {ra, s0-s11}, -160
2632 ; RV64IZCMP-NEXT: lui a6, %hi(var_test_irq)
2633 ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq)(a6)
2634 ; RV64IZCMP-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
2635 ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2636 ; RV64IZCMP-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
2637 ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2638 ; RV64IZCMP-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
2639 ; RV64IZCMP-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2640 ; RV64IZCMP-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
2641 ; RV64IZCMP-NEXT: addi a5, a6, %lo(var_test_irq)
2642 ; RV64IZCMP-NEXT: lw a0, 16(a5)
2643 ; RV64IZCMP-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
2644 ; RV64IZCMP-NEXT: lw a0, 20(a5)
2645 ; RV64IZCMP-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
2646 ; RV64IZCMP-NEXT: lw t4, 24(a5)
2647 ; RV64IZCMP-NEXT: lw t5, 28(a5)
2648 ; RV64IZCMP-NEXT: lw t6, 32(a5)
2649 ; RV64IZCMP-NEXT: lw s2, 36(a5)
2650 ; RV64IZCMP-NEXT: lw s3, 40(a5)
2651 ; RV64IZCMP-NEXT: lw s4, 44(a5)
2652 ; RV64IZCMP-NEXT: lw s5, 48(a5)
2653 ; RV64IZCMP-NEXT: lw s6, 52(a5)
2654 ; RV64IZCMP-NEXT: lw s7, 56(a5)
2655 ; RV64IZCMP-NEXT: lw s8, 60(a5)
2656 ; RV64IZCMP-NEXT: lw s9, 64(a5)
2657 ; RV64IZCMP-NEXT: lw s10, 68(a5)
2658 ; RV64IZCMP-NEXT: lw s11, 72(a5)
2659 ; RV64IZCMP-NEXT: lw ra, 76(a5)
2660 ; RV64IZCMP-NEXT: lw s1, 80(a5)
2661 ; RV64IZCMP-NEXT: lw t3, 84(a5)
2662 ; RV64IZCMP-NEXT: lw t2, 88(a5)
2663 ; RV64IZCMP-NEXT: lw t1, 92(a5)
2664 ; RV64IZCMP-NEXT: lw t0, 96(a5)
2665 ; RV64IZCMP-NEXT: lw s0, 100(a5)
2666 ; RV64IZCMP-NEXT: lw a7, 104(a5)
2667 ; RV64IZCMP-NEXT: lw a4, 108(a5)
2668 ; RV64IZCMP-NEXT: lw a0, 124(a5)
2669 ; RV64IZCMP-NEXT: lw a1, 120(a5)
2670 ; RV64IZCMP-NEXT: lw a2, 116(a5)
2671 ; RV64IZCMP-NEXT: lw a3, 112(a5)
2672 ; RV64IZCMP-NEXT: sw a0, 124(a5)
2673 ; RV64IZCMP-NEXT: sw a1, 120(a5)
2674 ; RV64IZCMP-NEXT: sw a2, 116(a5)
2675 ; RV64IZCMP-NEXT: sw a3, 112(a5)
2676 ; RV64IZCMP-NEXT: sw a4, 108(a5)
2677 ; RV64IZCMP-NEXT: sw a7, 104(a5)
2678 ; RV64IZCMP-NEXT: sw s0, 100(a5)
2679 ; RV64IZCMP-NEXT: sw t0, 96(a5)
2680 ; RV64IZCMP-NEXT: sw t1, 92(a5)
2681 ; RV64IZCMP-NEXT: sw t2, 88(a5)
2682 ; RV64IZCMP-NEXT: sw t3, 84(a5)
2683 ; RV64IZCMP-NEXT: sw s1, 80(a5)
2684 ; RV64IZCMP-NEXT: sw ra, 76(a5)
2685 ; RV64IZCMP-NEXT: sw s11, 72(a5)
2686 ; RV64IZCMP-NEXT: sw s10, 68(a5)
2687 ; RV64IZCMP-NEXT: sw s9, 64(a5)
2688 ; RV64IZCMP-NEXT: sw s8, 60(a5)
2689 ; RV64IZCMP-NEXT: sw s7, 56(a5)
2690 ; RV64IZCMP-NEXT: sw s6, 52(a5)
2691 ; RV64IZCMP-NEXT: sw s5, 48(a5)
2692 ; RV64IZCMP-NEXT: sw s4, 44(a5)
2693 ; RV64IZCMP-NEXT: sw s3, 40(a5)
2694 ; RV64IZCMP-NEXT: sw s2, 36(a5)
2695 ; RV64IZCMP-NEXT: sw t6, 32(a5)
2696 ; RV64IZCMP-NEXT: sw t5, 28(a5)
2697 ; RV64IZCMP-NEXT: sw t4, 24(a5)
2698 ; RV64IZCMP-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
2699 ; RV64IZCMP-NEXT: sw a0, 20(a5)
2700 ; RV64IZCMP-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
2701 ; RV64IZCMP-NEXT: sw a0, 16(a5)
2702 ; RV64IZCMP-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
2703 ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2704 ; RV64IZCMP-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
2705 ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2706 ; RV64IZCMP-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
2707 ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2708 ; RV64IZCMP-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
2709 ; RV64IZCMP-NEXT: sw a0, %lo(var_test_irq)(a6)
2710 ; RV64IZCMP-NEXT: cm.popret {ra, s0-s11}, 160
2712 ; RV32IZCMP-SR-LABEL: callee_no_irq:
2713 ; RV32IZCMP-SR: # %bb.0:
2714 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -96
2715 ; RV32IZCMP-SR-NEXT: lui a6, %hi(var_test_irq)
2716 ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a6)
2717 ; RV32IZCMP-SR-NEXT: sw a0, 28(sp) # 4-byte Folded Spill
2718 ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2719 ; RV32IZCMP-SR-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
2720 ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2721 ; RV32IZCMP-SR-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
2722 ; RV32IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2723 ; RV32IZCMP-SR-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
2724 ; RV32IZCMP-SR-NEXT: addi a5, a6, %lo(var_test_irq)
2725 ; RV32IZCMP-SR-NEXT: lw a0, 16(a5)
2726 ; RV32IZCMP-SR-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
2727 ; RV32IZCMP-SR-NEXT: lw a0, 20(a5)
2728 ; RV32IZCMP-SR-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
2729 ; RV32IZCMP-SR-NEXT: lw t4, 24(a5)
2730 ; RV32IZCMP-SR-NEXT: lw t5, 28(a5)
2731 ; RV32IZCMP-SR-NEXT: lw t6, 32(a5)
2732 ; RV32IZCMP-SR-NEXT: lw s2, 36(a5)
2733 ; RV32IZCMP-SR-NEXT: lw s3, 40(a5)
2734 ; RV32IZCMP-SR-NEXT: lw s4, 44(a5)
2735 ; RV32IZCMP-SR-NEXT: lw s5, 48(a5)
2736 ; RV32IZCMP-SR-NEXT: lw s6, 52(a5)
2737 ; RV32IZCMP-SR-NEXT: lw s7, 56(a5)
2738 ; RV32IZCMP-SR-NEXT: lw s8, 60(a5)
2739 ; RV32IZCMP-SR-NEXT: lw s9, 64(a5)
2740 ; RV32IZCMP-SR-NEXT: lw s10, 68(a5)
2741 ; RV32IZCMP-SR-NEXT: lw s11, 72(a5)
2742 ; RV32IZCMP-SR-NEXT: lw ra, 76(a5)
2743 ; RV32IZCMP-SR-NEXT: lw s1, 80(a5)
2744 ; RV32IZCMP-SR-NEXT: lw t3, 84(a5)
2745 ; RV32IZCMP-SR-NEXT: lw t2, 88(a5)
2746 ; RV32IZCMP-SR-NEXT: lw t1, 92(a5)
2747 ; RV32IZCMP-SR-NEXT: lw t0, 96(a5)
2748 ; RV32IZCMP-SR-NEXT: lw s0, 100(a5)
2749 ; RV32IZCMP-SR-NEXT: lw a7, 104(a5)
2750 ; RV32IZCMP-SR-NEXT: lw a4, 108(a5)
2751 ; RV32IZCMP-SR-NEXT: lw a0, 124(a5)
2752 ; RV32IZCMP-SR-NEXT: lw a1, 120(a5)
2753 ; RV32IZCMP-SR-NEXT: lw a2, 116(a5)
2754 ; RV32IZCMP-SR-NEXT: lw a3, 112(a5)
2755 ; RV32IZCMP-SR-NEXT: sw a0, 124(a5)
2756 ; RV32IZCMP-SR-NEXT: sw a1, 120(a5)
2757 ; RV32IZCMP-SR-NEXT: sw a2, 116(a5)
2758 ; RV32IZCMP-SR-NEXT: sw a3, 112(a5)
2759 ; RV32IZCMP-SR-NEXT: sw a4, 108(a5)
2760 ; RV32IZCMP-SR-NEXT: sw a7, 104(a5)
2761 ; RV32IZCMP-SR-NEXT: sw s0, 100(a5)
2762 ; RV32IZCMP-SR-NEXT: sw t0, 96(a5)
2763 ; RV32IZCMP-SR-NEXT: sw t1, 92(a5)
2764 ; RV32IZCMP-SR-NEXT: sw t2, 88(a5)
2765 ; RV32IZCMP-SR-NEXT: sw t3, 84(a5)
2766 ; RV32IZCMP-SR-NEXT: sw s1, 80(a5)
2767 ; RV32IZCMP-SR-NEXT: sw ra, 76(a5)
2768 ; RV32IZCMP-SR-NEXT: sw s11, 72(a5)
2769 ; RV32IZCMP-SR-NEXT: sw s10, 68(a5)
2770 ; RV32IZCMP-SR-NEXT: sw s9, 64(a5)
2771 ; RV32IZCMP-SR-NEXT: sw s8, 60(a5)
2772 ; RV32IZCMP-SR-NEXT: sw s7, 56(a5)
2773 ; RV32IZCMP-SR-NEXT: sw s6, 52(a5)
2774 ; RV32IZCMP-SR-NEXT: sw s5, 48(a5)
2775 ; RV32IZCMP-SR-NEXT: sw s4, 44(a5)
2776 ; RV32IZCMP-SR-NEXT: sw s3, 40(a5)
2777 ; RV32IZCMP-SR-NEXT: sw s2, 36(a5)
2778 ; RV32IZCMP-SR-NEXT: sw t6, 32(a5)
2779 ; RV32IZCMP-SR-NEXT: sw t5, 28(a5)
2780 ; RV32IZCMP-SR-NEXT: sw t4, 24(a5)
2781 ; RV32IZCMP-SR-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
2782 ; RV32IZCMP-SR-NEXT: sw a0, 20(a5)
2783 ; RV32IZCMP-SR-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
2784 ; RV32IZCMP-SR-NEXT: sw a0, 16(a5)
2785 ; RV32IZCMP-SR-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
2786 ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2787 ; RV32IZCMP-SR-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
2788 ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2789 ; RV32IZCMP-SR-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
2790 ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2791 ; RV32IZCMP-SR-NEXT: lw a0, 28(sp) # 4-byte Folded Reload
2792 ; RV32IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a6)
2793 ; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 96
2795 ; RV64IZCMP-SR-LABEL: callee_no_irq:
2796 ; RV64IZCMP-SR: # %bb.0:
2797 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s11}, -160
2798 ; RV64IZCMP-SR-NEXT: lui a6, %hi(var_test_irq)
2799 ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq)(a6)
2800 ; RV64IZCMP-SR-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
2801 ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2802 ; RV64IZCMP-SR-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
2803 ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2804 ; RV64IZCMP-SR-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
2805 ; RV64IZCMP-SR-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2806 ; RV64IZCMP-SR-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
2807 ; RV64IZCMP-SR-NEXT: addi a5, a6, %lo(var_test_irq)
2808 ; RV64IZCMP-SR-NEXT: lw a0, 16(a5)
2809 ; RV64IZCMP-SR-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
2810 ; RV64IZCMP-SR-NEXT: lw a0, 20(a5)
2811 ; RV64IZCMP-SR-NEXT: sd a0, 0(sp) # 8-byte Folded Spill
2812 ; RV64IZCMP-SR-NEXT: lw t4, 24(a5)
2813 ; RV64IZCMP-SR-NEXT: lw t5, 28(a5)
2814 ; RV64IZCMP-SR-NEXT: lw t6, 32(a5)
2815 ; RV64IZCMP-SR-NEXT: lw s2, 36(a5)
2816 ; RV64IZCMP-SR-NEXT: lw s3, 40(a5)
2817 ; RV64IZCMP-SR-NEXT: lw s4, 44(a5)
2818 ; RV64IZCMP-SR-NEXT: lw s5, 48(a5)
2819 ; RV64IZCMP-SR-NEXT: lw s6, 52(a5)
2820 ; RV64IZCMP-SR-NEXT: lw s7, 56(a5)
2821 ; RV64IZCMP-SR-NEXT: lw s8, 60(a5)
2822 ; RV64IZCMP-SR-NEXT: lw s9, 64(a5)
2823 ; RV64IZCMP-SR-NEXT: lw s10, 68(a5)
2824 ; RV64IZCMP-SR-NEXT: lw s11, 72(a5)
2825 ; RV64IZCMP-SR-NEXT: lw ra, 76(a5)
2826 ; RV64IZCMP-SR-NEXT: lw s1, 80(a5)
2827 ; RV64IZCMP-SR-NEXT: lw t3, 84(a5)
2828 ; RV64IZCMP-SR-NEXT: lw t2, 88(a5)
2829 ; RV64IZCMP-SR-NEXT: lw t1, 92(a5)
2830 ; RV64IZCMP-SR-NEXT: lw t0, 96(a5)
2831 ; RV64IZCMP-SR-NEXT: lw s0, 100(a5)
2832 ; RV64IZCMP-SR-NEXT: lw a7, 104(a5)
2833 ; RV64IZCMP-SR-NEXT: lw a4, 108(a5)
2834 ; RV64IZCMP-SR-NEXT: lw a0, 124(a5)
2835 ; RV64IZCMP-SR-NEXT: lw a1, 120(a5)
2836 ; RV64IZCMP-SR-NEXT: lw a2, 116(a5)
2837 ; RV64IZCMP-SR-NEXT: lw a3, 112(a5)
2838 ; RV64IZCMP-SR-NEXT: sw a0, 124(a5)
2839 ; RV64IZCMP-SR-NEXT: sw a1, 120(a5)
2840 ; RV64IZCMP-SR-NEXT: sw a2, 116(a5)
2841 ; RV64IZCMP-SR-NEXT: sw a3, 112(a5)
2842 ; RV64IZCMP-SR-NEXT: sw a4, 108(a5)
2843 ; RV64IZCMP-SR-NEXT: sw a7, 104(a5)
2844 ; RV64IZCMP-SR-NEXT: sw s0, 100(a5)
2845 ; RV64IZCMP-SR-NEXT: sw t0, 96(a5)
2846 ; RV64IZCMP-SR-NEXT: sw t1, 92(a5)
2847 ; RV64IZCMP-SR-NEXT: sw t2, 88(a5)
2848 ; RV64IZCMP-SR-NEXT: sw t3, 84(a5)
2849 ; RV64IZCMP-SR-NEXT: sw s1, 80(a5)
2850 ; RV64IZCMP-SR-NEXT: sw ra, 76(a5)
2851 ; RV64IZCMP-SR-NEXT: sw s11, 72(a5)
2852 ; RV64IZCMP-SR-NEXT: sw s10, 68(a5)
2853 ; RV64IZCMP-SR-NEXT: sw s9, 64(a5)
2854 ; RV64IZCMP-SR-NEXT: sw s8, 60(a5)
2855 ; RV64IZCMP-SR-NEXT: sw s7, 56(a5)
2856 ; RV64IZCMP-SR-NEXT: sw s6, 52(a5)
2857 ; RV64IZCMP-SR-NEXT: sw s5, 48(a5)
2858 ; RV64IZCMP-SR-NEXT: sw s4, 44(a5)
2859 ; RV64IZCMP-SR-NEXT: sw s3, 40(a5)
2860 ; RV64IZCMP-SR-NEXT: sw s2, 36(a5)
2861 ; RV64IZCMP-SR-NEXT: sw t6, 32(a5)
2862 ; RV64IZCMP-SR-NEXT: sw t5, 28(a5)
2863 ; RV64IZCMP-SR-NEXT: sw t4, 24(a5)
2864 ; RV64IZCMP-SR-NEXT: ld a0, 0(sp) # 8-byte Folded Reload
2865 ; RV64IZCMP-SR-NEXT: sw a0, 20(a5)
2866 ; RV64IZCMP-SR-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
2867 ; RV64IZCMP-SR-NEXT: sw a0, 16(a5)
2868 ; RV64IZCMP-SR-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
2869 ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2870 ; RV64IZCMP-SR-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
2871 ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2872 ; RV64IZCMP-SR-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
2873 ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2874 ; RV64IZCMP-SR-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
2875 ; RV64IZCMP-SR-NEXT: sw a0, %lo(var_test_irq)(a6)
2876 ; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s11}, 160
2878 ; RV32I-LABEL: callee_no_irq:
2880 ; RV32I-NEXT: addi sp, sp, -80
2881 ; RV32I-NEXT: sw ra, 76(sp) # 4-byte Folded Spill
2882 ; RV32I-NEXT: sw s0, 72(sp) # 4-byte Folded Spill
2883 ; RV32I-NEXT: sw s1, 68(sp) # 4-byte Folded Spill
2884 ; RV32I-NEXT: sw s2, 64(sp) # 4-byte Folded Spill
2885 ; RV32I-NEXT: sw s3, 60(sp) # 4-byte Folded Spill
2886 ; RV32I-NEXT: sw s4, 56(sp) # 4-byte Folded Spill
2887 ; RV32I-NEXT: sw s5, 52(sp) # 4-byte Folded Spill
2888 ; RV32I-NEXT: sw s6, 48(sp) # 4-byte Folded Spill
2889 ; RV32I-NEXT: sw s7, 44(sp) # 4-byte Folded Spill
2890 ; RV32I-NEXT: sw s8, 40(sp) # 4-byte Folded Spill
2891 ; RV32I-NEXT: sw s9, 36(sp) # 4-byte Folded Spill
2892 ; RV32I-NEXT: sw s10, 32(sp) # 4-byte Folded Spill
2893 ; RV32I-NEXT: sw s11, 28(sp) # 4-byte Folded Spill
2894 ; RV32I-NEXT: lui a6, %hi(var_test_irq)
2895 ; RV32I-NEXT: lw a0, %lo(var_test_irq)(a6)
2896 ; RV32I-NEXT: sw a0, 24(sp) # 4-byte Folded Spill
2897 ; RV32I-NEXT: lw a0, %lo(var_test_irq+4)(a6)
2898 ; RV32I-NEXT: sw a0, 20(sp) # 4-byte Folded Spill
2899 ; RV32I-NEXT: lw a0, %lo(var_test_irq+8)(a6)
2900 ; RV32I-NEXT: sw a0, 16(sp) # 4-byte Folded Spill
2901 ; RV32I-NEXT: lw a0, %lo(var_test_irq+12)(a6)
2902 ; RV32I-NEXT: sw a0, 12(sp) # 4-byte Folded Spill
2903 ; RV32I-NEXT: addi a5, a6, %lo(var_test_irq)
2904 ; RV32I-NEXT: lw a0, 16(a5)
2905 ; RV32I-NEXT: sw a0, 8(sp) # 4-byte Folded Spill
2906 ; RV32I-NEXT: lw a0, 20(a5)
2907 ; RV32I-NEXT: sw a0, 4(sp) # 4-byte Folded Spill
2908 ; RV32I-NEXT: lw t0, 24(a5)
2909 ; RV32I-NEXT: lw t1, 28(a5)
2910 ; RV32I-NEXT: lw t2, 32(a5)
2911 ; RV32I-NEXT: lw t3, 36(a5)
2912 ; RV32I-NEXT: lw t4, 40(a5)
2913 ; RV32I-NEXT: lw t5, 44(a5)
2914 ; RV32I-NEXT: lw t6, 48(a5)
2915 ; RV32I-NEXT: lw s0, 52(a5)
2916 ; RV32I-NEXT: lw s1, 56(a5)
2917 ; RV32I-NEXT: lw s2, 60(a5)
2918 ; RV32I-NEXT: lw s3, 64(a5)
2919 ; RV32I-NEXT: lw s4, 68(a5)
2920 ; RV32I-NEXT: lw s5, 72(a5)
2921 ; RV32I-NEXT: lw s6, 76(a5)
2922 ; RV32I-NEXT: lw s7, 80(a5)
2923 ; RV32I-NEXT: lw s8, 84(a5)
2924 ; RV32I-NEXT: lw s9, 88(a5)
2925 ; RV32I-NEXT: lw s10, 92(a5)
2926 ; RV32I-NEXT: lw s11, 96(a5)
2927 ; RV32I-NEXT: lw ra, 100(a5)
2928 ; RV32I-NEXT: lw a7, 104(a5)
2929 ; RV32I-NEXT: lw a4, 108(a5)
2930 ; RV32I-NEXT: lw a0, 124(a5)
2931 ; RV32I-NEXT: lw a1, 120(a5)
2932 ; RV32I-NEXT: lw a2, 116(a5)
2933 ; RV32I-NEXT: lw a3, 112(a5)
2934 ; RV32I-NEXT: sw a0, 124(a5)
2935 ; RV32I-NEXT: sw a1, 120(a5)
2936 ; RV32I-NEXT: sw a2, 116(a5)
2937 ; RV32I-NEXT: sw a3, 112(a5)
2938 ; RV32I-NEXT: sw a4, 108(a5)
2939 ; RV32I-NEXT: sw a7, 104(a5)
2940 ; RV32I-NEXT: sw ra, 100(a5)
2941 ; RV32I-NEXT: sw s11, 96(a5)
2942 ; RV32I-NEXT: sw s10, 92(a5)
2943 ; RV32I-NEXT: sw s9, 88(a5)
2944 ; RV32I-NEXT: sw s8, 84(a5)
2945 ; RV32I-NEXT: sw s7, 80(a5)
2946 ; RV32I-NEXT: sw s6, 76(a5)
2947 ; RV32I-NEXT: sw s5, 72(a5)
2948 ; RV32I-NEXT: sw s4, 68(a5)
2949 ; RV32I-NEXT: sw s3, 64(a5)
2950 ; RV32I-NEXT: sw s2, 60(a5)
2951 ; RV32I-NEXT: sw s1, 56(a5)
2952 ; RV32I-NEXT: sw s0, 52(a5)
2953 ; RV32I-NEXT: sw t6, 48(a5)
2954 ; RV32I-NEXT: sw t5, 44(a5)
2955 ; RV32I-NEXT: sw t4, 40(a5)
2956 ; RV32I-NEXT: sw t3, 36(a5)
2957 ; RV32I-NEXT: sw t2, 32(a5)
2958 ; RV32I-NEXT: sw t1, 28(a5)
2959 ; RV32I-NEXT: sw t0, 24(a5)
2960 ; RV32I-NEXT: lw a0, 4(sp) # 4-byte Folded Reload
2961 ; RV32I-NEXT: sw a0, 20(a5)
2962 ; RV32I-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
2963 ; RV32I-NEXT: sw a0, 16(a5)
2964 ; RV32I-NEXT: lw a0, 12(sp) # 4-byte Folded Reload
2965 ; RV32I-NEXT: sw a0, %lo(var_test_irq+12)(a6)
2966 ; RV32I-NEXT: lw a0, 16(sp) # 4-byte Folded Reload
2967 ; RV32I-NEXT: sw a0, %lo(var_test_irq+8)(a6)
2968 ; RV32I-NEXT: lw a0, 20(sp) # 4-byte Folded Reload
2969 ; RV32I-NEXT: sw a0, %lo(var_test_irq+4)(a6)
2970 ; RV32I-NEXT: lw a0, 24(sp) # 4-byte Folded Reload
2971 ; RV32I-NEXT: sw a0, %lo(var_test_irq)(a6)
2972 ; RV32I-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
2973 ; RV32I-NEXT: lw s0, 72(sp) # 4-byte Folded Reload
2974 ; RV32I-NEXT: lw s1, 68(sp) # 4-byte Folded Reload
2975 ; RV32I-NEXT: lw s2, 64(sp) # 4-byte Folded Reload
2976 ; RV32I-NEXT: lw s3, 60(sp) # 4-byte Folded Reload
2977 ; RV32I-NEXT: lw s4, 56(sp) # 4-byte Folded Reload
2978 ; RV32I-NEXT: lw s5, 52(sp) # 4-byte Folded Reload
2979 ; RV32I-NEXT: lw s6, 48(sp) # 4-byte Folded Reload
2980 ; RV32I-NEXT: lw s7, 44(sp) # 4-byte Folded Reload
2981 ; RV32I-NEXT: lw s8, 40(sp) # 4-byte Folded Reload
2982 ; RV32I-NEXT: lw s9, 36(sp) # 4-byte Folded Reload
2983 ; RV32I-NEXT: lw s10, 32(sp) # 4-byte Folded Reload
2984 ; RV32I-NEXT: lw s11, 28(sp) # 4-byte Folded Reload
2985 ; RV32I-NEXT: addi sp, sp, 80
2988 ; RV64I-LABEL: callee_no_irq:
2990 ; RV64I-NEXT: addi sp, sp, -160
2991 ; RV64I-NEXT: sd ra, 152(sp) # 8-byte Folded Spill
2992 ; RV64I-NEXT: sd s0, 144(sp) # 8-byte Folded Spill
2993 ; RV64I-NEXT: sd s1, 136(sp) # 8-byte Folded Spill
2994 ; RV64I-NEXT: sd s2, 128(sp) # 8-byte Folded Spill
2995 ; RV64I-NEXT: sd s3, 120(sp) # 8-byte Folded Spill
2996 ; RV64I-NEXT: sd s4, 112(sp) # 8-byte Folded Spill
2997 ; RV64I-NEXT: sd s5, 104(sp) # 8-byte Folded Spill
2998 ; RV64I-NEXT: sd s6, 96(sp) # 8-byte Folded Spill
2999 ; RV64I-NEXT: sd s7, 88(sp) # 8-byte Folded Spill
3000 ; RV64I-NEXT: sd s8, 80(sp) # 8-byte Folded Spill
3001 ; RV64I-NEXT: sd s9, 72(sp) # 8-byte Folded Spill
3002 ; RV64I-NEXT: sd s10, 64(sp) # 8-byte Folded Spill
3003 ; RV64I-NEXT: sd s11, 56(sp) # 8-byte Folded Spill
3004 ; RV64I-NEXT: lui a6, %hi(var_test_irq)
3005 ; RV64I-NEXT: lw a0, %lo(var_test_irq)(a6)
3006 ; RV64I-NEXT: sd a0, 48(sp) # 8-byte Folded Spill
3007 ; RV64I-NEXT: lw a0, %lo(var_test_irq+4)(a6)
3008 ; RV64I-NEXT: sd a0, 40(sp) # 8-byte Folded Spill
3009 ; RV64I-NEXT: lw a0, %lo(var_test_irq+8)(a6)
3010 ; RV64I-NEXT: sd a0, 32(sp) # 8-byte Folded Spill
3011 ; RV64I-NEXT: lw a0, %lo(var_test_irq+12)(a6)
3012 ; RV64I-NEXT: sd a0, 24(sp) # 8-byte Folded Spill
3013 ; RV64I-NEXT: addi a5, a6, %lo(var_test_irq)
3014 ; RV64I-NEXT: lw a0, 16(a5)
3015 ; RV64I-NEXT: sd a0, 16(sp) # 8-byte Folded Spill
3016 ; RV64I-NEXT: lw a0, 20(a5)
3017 ; RV64I-NEXT: sd a0, 8(sp) # 8-byte Folded Spill
3018 ; RV64I-NEXT: lw t0, 24(a5)
3019 ; RV64I-NEXT: lw t1, 28(a5)
3020 ; RV64I-NEXT: lw t2, 32(a5)
3021 ; RV64I-NEXT: lw t3, 36(a5)
3022 ; RV64I-NEXT: lw t4, 40(a5)
3023 ; RV64I-NEXT: lw t5, 44(a5)
3024 ; RV64I-NEXT: lw t6, 48(a5)
3025 ; RV64I-NEXT: lw s0, 52(a5)
3026 ; RV64I-NEXT: lw s1, 56(a5)
3027 ; RV64I-NEXT: lw s2, 60(a5)
3028 ; RV64I-NEXT: lw s3, 64(a5)
3029 ; RV64I-NEXT: lw s4, 68(a5)
3030 ; RV64I-NEXT: lw s5, 72(a5)
3031 ; RV64I-NEXT: lw s6, 76(a5)
3032 ; RV64I-NEXT: lw s7, 80(a5)
3033 ; RV64I-NEXT: lw s8, 84(a5)
3034 ; RV64I-NEXT: lw s9, 88(a5)
3035 ; RV64I-NEXT: lw s10, 92(a5)
3036 ; RV64I-NEXT: lw s11, 96(a5)
3037 ; RV64I-NEXT: lw ra, 100(a5)
3038 ; RV64I-NEXT: lw a7, 104(a5)
3039 ; RV64I-NEXT: lw a4, 108(a5)
3040 ; RV64I-NEXT: lw a0, 124(a5)
3041 ; RV64I-NEXT: lw a1, 120(a5)
3042 ; RV64I-NEXT: lw a2, 116(a5)
3043 ; RV64I-NEXT: lw a3, 112(a5)
3044 ; RV64I-NEXT: sw a0, 124(a5)
3045 ; RV64I-NEXT: sw a1, 120(a5)
3046 ; RV64I-NEXT: sw a2, 116(a5)
3047 ; RV64I-NEXT: sw a3, 112(a5)
3048 ; RV64I-NEXT: sw a4, 108(a5)
3049 ; RV64I-NEXT: sw a7, 104(a5)
3050 ; RV64I-NEXT: sw ra, 100(a5)
3051 ; RV64I-NEXT: sw s11, 96(a5)
3052 ; RV64I-NEXT: sw s10, 92(a5)
3053 ; RV64I-NEXT: sw s9, 88(a5)
3054 ; RV64I-NEXT: sw s8, 84(a5)
3055 ; RV64I-NEXT: sw s7, 80(a5)
3056 ; RV64I-NEXT: sw s6, 76(a5)
3057 ; RV64I-NEXT: sw s5, 72(a5)
3058 ; RV64I-NEXT: sw s4, 68(a5)
3059 ; RV64I-NEXT: sw s3, 64(a5)
3060 ; RV64I-NEXT: sw s2, 60(a5)
3061 ; RV64I-NEXT: sw s1, 56(a5)
3062 ; RV64I-NEXT: sw s0, 52(a5)
3063 ; RV64I-NEXT: sw t6, 48(a5)
3064 ; RV64I-NEXT: sw t5, 44(a5)
3065 ; RV64I-NEXT: sw t4, 40(a5)
3066 ; RV64I-NEXT: sw t3, 36(a5)
3067 ; RV64I-NEXT: sw t2, 32(a5)
3068 ; RV64I-NEXT: sw t1, 28(a5)
3069 ; RV64I-NEXT: sw t0, 24(a5)
3070 ; RV64I-NEXT: ld a0, 8(sp) # 8-byte Folded Reload
3071 ; RV64I-NEXT: sw a0, 20(a5)
3072 ; RV64I-NEXT: ld a0, 16(sp) # 8-byte Folded Reload
3073 ; RV64I-NEXT: sw a0, 16(a5)
3074 ; RV64I-NEXT: ld a0, 24(sp) # 8-byte Folded Reload
3075 ; RV64I-NEXT: sw a0, %lo(var_test_irq+12)(a6)
3076 ; RV64I-NEXT: ld a0, 32(sp) # 8-byte Folded Reload
3077 ; RV64I-NEXT: sw a0, %lo(var_test_irq+8)(a6)
3078 ; RV64I-NEXT: ld a0, 40(sp) # 8-byte Folded Reload
3079 ; RV64I-NEXT: sw a0, %lo(var_test_irq+4)(a6)
3080 ; RV64I-NEXT: ld a0, 48(sp) # 8-byte Folded Reload
3081 ; RV64I-NEXT: sw a0, %lo(var_test_irq)(a6)
3082 ; RV64I-NEXT: ld ra, 152(sp) # 8-byte Folded Reload
3083 ; RV64I-NEXT: ld s0, 144(sp) # 8-byte Folded Reload
3084 ; RV64I-NEXT: ld s1, 136(sp) # 8-byte Folded Reload
3085 ; RV64I-NEXT: ld s2, 128(sp) # 8-byte Folded Reload
3086 ; RV64I-NEXT: ld s3, 120(sp) # 8-byte Folded Reload
3087 ; RV64I-NEXT: ld s4, 112(sp) # 8-byte Folded Reload
3088 ; RV64I-NEXT: ld s5, 104(sp) # 8-byte Folded Reload
3089 ; RV64I-NEXT: ld s6, 96(sp) # 8-byte Folded Reload
3090 ; RV64I-NEXT: ld s7, 88(sp) # 8-byte Folded Reload
3091 ; RV64I-NEXT: ld s8, 80(sp) # 8-byte Folded Reload
3092 ; RV64I-NEXT: ld s9, 72(sp) # 8-byte Folded Reload
3093 ; RV64I-NEXT: ld s10, 64(sp) # 8-byte Folded Reload
3094 ; RV64I-NEXT: ld s11, 56(sp) # 8-byte Folded Reload
3095 ; RV64I-NEXT: addi sp, sp, 160
3097 %val = load [32 x i32], [32 x i32]* @var_test_irq
3098 store volatile [32 x i32] %val, [32 x i32]* @var_test_irq
3102 declare void @bar(ptr, ptr)
3103 declare ptr @llvm.frameaddress.p0(i32 immarg)
3105 define i32 @use_fp(i32 %x) {
3106 ; RV32IZCMP-LABEL: use_fp:
3107 ; RV32IZCMP: # %bb.0: # %entry
3108 ; RV32IZCMP-NEXT: cm.push {ra, s0-s1}, -32
3109 ; RV32IZCMP-NEXT: .cfi_def_cfa_offset 32
3110 ; RV32IZCMP-NEXT: .cfi_offset ra, -12
3111 ; RV32IZCMP-NEXT: .cfi_offset s0, -8
3112 ; RV32IZCMP-NEXT: .cfi_offset s1, -4
3113 ; RV32IZCMP-NEXT: addi s0, sp, 32
3114 ; RV32IZCMP-NEXT: .cfi_def_cfa s0, 0
3115 ; RV32IZCMP-NEXT: mv s1, a0
3116 ; RV32IZCMP-NEXT: addi a1, s0, -20
3117 ; RV32IZCMP-NEXT: mv a0, s0
3118 ; RV32IZCMP-NEXT: call bar
3119 ; RV32IZCMP-NEXT: mv a0, s1
3120 ; RV32IZCMP-NEXT: cm.popret {ra, s0-s1}, 32
3122 ; RV64IZCMP-LABEL: use_fp:
3123 ; RV64IZCMP: # %bb.0: # %entry
3124 ; RV64IZCMP-NEXT: cm.push {ra, s0-s1}, -48
3125 ; RV64IZCMP-NEXT: .cfi_def_cfa_offset 48
3126 ; RV64IZCMP-NEXT: .cfi_offset ra, -24
3127 ; RV64IZCMP-NEXT: .cfi_offset s0, -16
3128 ; RV64IZCMP-NEXT: .cfi_offset s1, -8
3129 ; RV64IZCMP-NEXT: addi s0, sp, 48
3130 ; RV64IZCMP-NEXT: .cfi_def_cfa s0, 0
3131 ; RV64IZCMP-NEXT: mv s1, a0
3132 ; RV64IZCMP-NEXT: addi a1, s0, -36
3133 ; RV64IZCMP-NEXT: mv a0, s0
3134 ; RV64IZCMP-NEXT: call bar
3135 ; RV64IZCMP-NEXT: mv a0, s1
3136 ; RV64IZCMP-NEXT: cm.popret {ra, s0-s1}, 48
3138 ; RV32IZCMP-SR-LABEL: use_fp:
3139 ; RV32IZCMP-SR: # %bb.0: # %entry
3140 ; RV32IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -32
3141 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa_offset 32
3142 ; RV32IZCMP-SR-NEXT: .cfi_offset ra, -12
3143 ; RV32IZCMP-SR-NEXT: .cfi_offset s0, -8
3144 ; RV32IZCMP-SR-NEXT: .cfi_offset s1, -4
3145 ; RV32IZCMP-SR-NEXT: addi s0, sp, 32
3146 ; RV32IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
3147 ; RV32IZCMP-SR-NEXT: mv s1, a0
3148 ; RV32IZCMP-SR-NEXT: addi a1, s0, -20
3149 ; RV32IZCMP-SR-NEXT: mv a0, s0
3150 ; RV32IZCMP-SR-NEXT: call bar
3151 ; RV32IZCMP-SR-NEXT: mv a0, s1
3152 ; RV32IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 32
3154 ; RV64IZCMP-SR-LABEL: use_fp:
3155 ; RV64IZCMP-SR: # %bb.0: # %entry
3156 ; RV64IZCMP-SR-NEXT: cm.push {ra, s0-s1}, -48
3157 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa_offset 48
3158 ; RV64IZCMP-SR-NEXT: .cfi_offset ra, -24
3159 ; RV64IZCMP-SR-NEXT: .cfi_offset s0, -16
3160 ; RV64IZCMP-SR-NEXT: .cfi_offset s1, -8
3161 ; RV64IZCMP-SR-NEXT: addi s0, sp, 48
3162 ; RV64IZCMP-SR-NEXT: .cfi_def_cfa s0, 0
3163 ; RV64IZCMP-SR-NEXT: mv s1, a0
3164 ; RV64IZCMP-SR-NEXT: addi a1, s0, -36
3165 ; RV64IZCMP-SR-NEXT: mv a0, s0
3166 ; RV64IZCMP-SR-NEXT: call bar
3167 ; RV64IZCMP-SR-NEXT: mv a0, s1
3168 ; RV64IZCMP-SR-NEXT: cm.popret {ra, s0-s1}, 48
3170 ; RV32I-LABEL: use_fp:
3171 ; RV32I: # %bb.0: # %entry
3172 ; RV32I-NEXT: addi sp, sp, -16
3173 ; RV32I-NEXT: .cfi_def_cfa_offset 16
3174 ; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
3175 ; RV32I-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
3176 ; RV32I-NEXT: sw s1, 4(sp) # 4-byte Folded Spill
3177 ; RV32I-NEXT: .cfi_offset ra, -4
3178 ; RV32I-NEXT: .cfi_offset s0, -8
3179 ; RV32I-NEXT: .cfi_offset s1, -12
3180 ; RV32I-NEXT: addi s0, sp, 16
3181 ; RV32I-NEXT: .cfi_def_cfa s0, 0
3182 ; RV32I-NEXT: mv s1, a0
3183 ; RV32I-NEXT: addi a1, s0, -16
3184 ; RV32I-NEXT: mv a0, s0
3185 ; RV32I-NEXT: call bar
3186 ; RV32I-NEXT: mv a0, s1
3187 ; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
3188 ; RV32I-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
3189 ; RV32I-NEXT: lw s1, 4(sp) # 4-byte Folded Reload
3190 ; RV32I-NEXT: addi sp, sp, 16
3193 ; RV64I-LABEL: use_fp:
3194 ; RV64I: # %bb.0: # %entry
3195 ; RV64I-NEXT: addi sp, sp, -32
3196 ; RV64I-NEXT: .cfi_def_cfa_offset 32
3197 ; RV64I-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
3198 ; RV64I-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
3199 ; RV64I-NEXT: sd s1, 8(sp) # 8-byte Folded Spill
3200 ; RV64I-NEXT: .cfi_offset ra, -8
3201 ; RV64I-NEXT: .cfi_offset s0, -16
3202 ; RV64I-NEXT: .cfi_offset s1, -24
3203 ; RV64I-NEXT: addi s0, sp, 32
3204 ; RV64I-NEXT: .cfi_def_cfa s0, 0
3205 ; RV64I-NEXT: mv s1, a0
3206 ; RV64I-NEXT: addi a1, s0, -28
3207 ; RV64I-NEXT: mv a0, s0
3208 ; RV64I-NEXT: call bar
3209 ; RV64I-NEXT: mv a0, s1
3210 ; RV64I-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
3211 ; RV64I-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
3212 ; RV64I-NEXT: ld s1, 8(sp) # 8-byte Folded Reload
3213 ; RV64I-NEXT: addi sp, sp, 32
3216 %var = alloca i32, align 4
3217 %0 = tail call ptr @llvm.frameaddress.p0(i32 0)
3218 call void @bar(ptr %0, ptr %var)