1 ; RUN: llc < %s --mtriple=wasm32-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck -DPTR=32 %s
2 ; RUN: llc < %s --mtriple=wasm64-unknown-unknown -asm-verbose=false -disable-wasm-fallthrough-return-opt -wasm-keep-registers | FileCheck -DPTR=64 %s
4 declare void @ext_func(i64* %ptr)
5 declare void @ext_func_i32(i32* %ptr)
7 ; CHECK: .globaltype __stack_pointer, i[[PTR]]{{$}}
9 ; CHECK-LABEL: alloca32:
10 ; Check that there is an extra local for the stack pointer.
11 ; CHECK: .local i[[PTR]]{{$}}
12 define void @alloca32() noredzone {
13 ; CHECK-NEXT: global.get $push[[L2:.+]]=, __stack_pointer{{$}}
14 ; CHECK-NEXT: i[[PTR]].const $push[[L3:.+]]=, 16
15 ; CHECK-NEXT: i[[PTR]].sub $push[[L9:.+]]=, $pop[[L2]], $pop[[L3]]
16 ; CHECK-NEXT: local.tee $push[[L8:.+]]=, [[SP:.+]], $pop[[L9]]{{$}}
17 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L8]]{{$}}
19 ; CHECK: local.get $push[[L4:.+]]=, [[SP]]{{$}}
20 ; CHECK: i32.const $push[[L0:.+]]=, 0
21 ; CHECK: i32.store 12($pop[[L4]]), $pop[[L0]]
22 store i32 0, i32* %retval
23 ; CHECK: local.get $push[[L6:.+]]=, [[SP]]{{$}}
24 ; CHECK-NEXT: i[[PTR]].const $push[[L5:.+]]=, 16
25 ; CHECK-NEXT: i[[PTR]].add $push[[L7:.+]]=, $pop[[L6]], $pop[[L5]]
26 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L7]]
30 ; CHECK-LABEL: alloca3264:
31 ; CHECK: .local i[[PTR]]{{$}}
32 define void @alloca3264() {
33 ; CHECK: global.get $push[[L3:.+]]=, __stack_pointer{{$}}
34 ; CHECK-NEXT: i[[PTR]].const $push[[L4:.+]]=, 16
35 ; CHECK-NEXT: i[[PTR]].sub $push[[L6:.+]]=, $pop[[L3]], $pop[[L4]]
36 ; CHECK-NEXT: local.tee $push[[L5:.+]]=, [[SP:.+]], $pop[[L6]]
40 store double 0.0, double* %r2
41 ; CHECK-NEXT: i64.const $push[[L1:.+]]=, 0
42 ; CHECK-NEXT: i64.store 0($pop[[L5]]), $pop[[L1]]
43 ; CHECK-NEXT: local.get $push[[L2:.+]]=, [[SP]]{{$}}
44 ; CHECK-NEXT: i32.const $push[[L0:.+]]=, 0
45 ; CHECK-NEXT: i32.store 12($pop[[L2]]), $pop[[L0]]
50 ; CHECK-LABEL: allocarray:
51 ; CHECK: .local i[[PTR]]{{$}}
52 define void @allocarray() {
53 ; CHECK-NEXT: global.get $push[[L4:.+]]=, __stack_pointer{{$}}
54 ; CHECK-NEXT: i[[PTR]].const $push[[L5:.+]]=, 144{{$}}
55 ; CHECK-NEXT: i[[PTR]].sub $push[[L12:.+]]=, $pop[[L4]], $pop[[L5]]
56 ; CHECK-NEXT: local.tee $push[[L11:.+]]=, 0, $pop[[L12]]
57 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L11]]
58 %r = alloca [33 x i32]
60 ; CHECK: i[[PTR]].const $push{{.+}}=, 24
61 ; CHECK-NEXT: i[[PTR]].add $push[[L3:.+]]=, $pop{{.+}}, $pop{{.+}}
62 ; CHECK-NEXT: i32.const $push[[L1:.+]]=, 1{{$}}
63 ; CHECK-NEXT: i32.store 0($pop[[L3]]), $pop[[L1]]{{$}}
64 ; CHECK-NEXT: local.get $push[[L4:.+]]=, 0{{$}}
65 ; CHECK-NEXT: i32.const $push[[L10:.+]]=, 1{{$}}
66 ; CHECK-NEXT: i32.store 12($pop[[L4]]), $pop[[L10]]{{$}}
67 %p = getelementptr [33 x i32], [33 x i32]* %r, i32 0, i32 0
69 %p2 = getelementptr [33 x i32], [33 x i32]* %r, i32 0, i32 3
72 ; CHECK-NEXT: local.get $push[[L2:.+]]=, [[SP]]{{$}}
73 ; CHECK-NEXT: i[[PTR]].const $push[[L7:.+]]=, 144
74 ; CHECK-NEXT: i[[PTR]].add $push[[L8:.+]]=, $pop[[L2]], $pop[[L7]]
75 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L8]]
79 ; CHECK-LABEL: non_mem_use
80 define void @non_mem_use(i8** %addr) {
81 ; CHECK: i[[PTR]].const $push[[L2:.+]]=, 48
82 ; CHECK-NEXT: i[[PTR]].sub $push[[L12:.+]]=, {{.+}}, $pop[[L2]]
83 ; CHECK-NEXT: local.tee $push[[L11:.+]]=, [[SP:.+]], $pop[[L12]]
84 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L11]]
85 %buf = alloca [27 x i8], align 16
89 ; CHECK: local.get $push[[L3:.+]]=, [[SP]]
90 ; CHECK: i[[PTR]].const $push[[OFF:.+]]=, 8
91 ; CHECK-NEXT: i[[PTR]].add $push[[ARG1:.+]]=, $pop[[L3]], $pop[[OFF]]
92 ; CHECK-NEXT: call ext_func, $pop[[ARG1]]
93 call void @ext_func(i64* %r)
94 ; %r2 is at SP+0, no add needed
95 ; CHECK: local.get $push[[L4:.+]]=, [[SP]]
96 ; CHECK-NEXT: call ext_func, $pop[[L4]]
97 call void @ext_func(i64* %r2)
98 ; Use as a value, but in a store
100 ; CHECK: local.get $push[[L5:.+]]=, [[SP]]
101 ; CHECK: i[[PTR]].const $push[[OFF:.+]]=, 16
102 ; CHECK-NEXT: i[[PTR]].add $push[[VAL:.+]]=, $pop[[L5]], $pop[[OFF]]
103 ; CHECK-NEXT: i[[PTR]].store 0($pop{{.+}}), $pop[[VAL]]
104 %gep = getelementptr inbounds [27 x i8], [27 x i8]* %buf, i32 0, i32 0
105 store i8* %gep, i8** %addr
109 ; CHECK-LABEL: allocarray_inbounds:
110 ; CHECK: .local i[[PTR]]{{$}}
111 define void @allocarray_inbounds() {
112 ; CHECK: global.get $push[[L3:.+]]=, __stack_pointer{{$}}
113 ; CHECK-NEXT: i[[PTR]].const $push[[L4:.+]]=, 32{{$}}
114 ; CHECK-NEXT: i[[PTR]].sub $push[[L11:.+]]=, $pop[[L3]], $pop[[L4]]
115 ; CHECK-NEXT: local.tee $push[[L10:.+]]=, [[SP:.+]], $pop[[L11]]
116 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L10]]{{$}}
117 %r = alloca [5 x i32]
118 ; CHECK: i32.const $push[[L3:.+]]=, 1
119 ; CHECK-DAG: i32.store 24(${{.+}}), $pop[[L3]]
120 %p = getelementptr inbounds [5 x i32], [5 x i32]* %r, i32 0, i32 0
122 ; This store should have both the GEP and the FI folded into it.
123 ; CHECK-DAG: i32.store 12(${{.+}}), $pop
124 %p2 = getelementptr inbounds [5 x i32], [5 x i32]* %r, i32 0, i32 3
125 store i32 1, i32* %p2
126 call void @ext_func(i64* null);
127 ; CHECK: call ext_func
128 ; CHECK: i[[PTR]].const $push[[L5:.+]]=, 32{{$}}
129 ; CHECK-NEXT: i[[PTR]].add $push[[L7:.+]]=, ${{.+}}, $pop[[L5]]
130 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L7]]
134 ; CHECK-LABEL: dynamic_alloca:
135 define void @dynamic_alloca(i32 %alloc) {
136 ; CHECK: global.get $push[[L13:.+]]=, __stack_pointer{{$}}
137 ; CHECK-NEXT: local.tee $push[[L12:.+]]=, [[SP:.+]], $pop[[L13]]{{$}}
138 ; Target independent codegen bumps the stack pointer.
139 ; CHECK: i[[PTR]].sub
140 ; Check that SP is written back to memory after decrement
141 ; CHECK: global.set __stack_pointer,
142 %r = alloca i32, i32 %alloc
143 ; Target-independent codegen also calculates the store addr
144 ; CHECK: call ext_func_i32
145 call void @ext_func_i32(i32* %r)
146 ; CHECK: global.set __stack_pointer, $pop{{.+}}
150 ; CHECK-LABEL: dynamic_alloca_redzone:
151 define void @dynamic_alloca_redzone(i32 %alloc) {
152 ; CHECK: global.get $push[[L13:.+]]=, __stack_pointer{{$}}
153 ; CHECK-NEXT: local.tee $push[[L12:.+]]=, [[SP:.+]], $pop[[L13]]{{$}}
154 ; Target independent codegen bumps the stack pointer
155 ; CHECK: i[[PTR]].sub
156 %r = alloca i32, i32 %alloc
157 ; CHECK-NEXT: local.tee $push[[L8:.+]]=, [[SP2:.+]], $pop
158 ; CHECK: local.get $push[[L7:.+]]=, [[SP2]]{{$}}
159 ; CHECK-NEXT: i32.const $push[[L6:.+]]=, 0{{$}}
160 ; CHECK-NEXT: i32.store 0($pop[[L7]]), $pop[[L6]]{{$}}
166 ; CHECK-LABEL: dynamic_static_alloca:
167 define void @dynamic_static_alloca(i32 %alloc) noredzone {
168 ; Decrement SP in the prolog by the static amount and writeback to memory.
169 ; CHECK: global.get $push[[L11:.+]]=, __stack_pointer{{$}}
170 ; CHECK-NEXT: i[[PTR]].const $push[[L12:.+]]=, 16
171 ; CHECK-NEXT: i[[PTR]].sub $push[[L23:.+]]=, $pop[[L11]], $pop[[L12]]
172 ; CHECK-NEXT: local.tee $push[[L22:.+]]=, [[SP:.+]], $pop[[L23]]
173 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L22]]
175 ; Alloc and write to a static alloca
176 ; CHECK: local.get $push[[L21:.+]]=, [[SP:.+]]
177 ; CHECK-NEXT: local.tee $push[[pushedFP:.+]]=, [[FP:.+]], $pop[[L21]]
178 ; CHECK-NEXT: i32.const $push[[L0:.+]]=, 101
179 ; CHECK-NEXT: i32.store [[static_offset:.+]]($pop[[pushedFP]]), $pop[[L0]]
181 store volatile i32 101, i32* %static
183 ; Decrement SP in the body by the dynamic amount.
184 ; CHECK: i[[PTR]].sub
185 ; CHECK: local.tee $push[[L16:.+]]=, [[dynamic_local:.+]], $pop{{.+}}
186 ; CHECK: local.tee $push[[L15:.+]]=, [[other:.+]], $pop[[L16]]{{$}}
187 ; CHECK: global.set __stack_pointer, $pop[[L15]]{{$}}
188 %dynamic = alloca i32, i32 %alloc
190 ; Ensure we don't modify the frame pointer after assigning it.
191 ; CHECK-NOT: $[[FP]]=
193 ; Ensure the static address doesn't change after modifying the stack pointer.
194 ; CHECK: local.get $push[[L17:.+]]=, [[FP]]
195 ; CHECK: i32.const $push[[L7:.+]]=, 102
196 ; CHECK-NEXT: i32.store [[static_offset]]($pop[[L17]]), $pop[[L7]]
197 ; CHECK-NEXT: local.get $push[[L9:.+]]=, [[dynamic_local]]{{$}}
198 ; CHECK-NEXT: i32.const $push[[L8:.+]]=, 103
199 ; CHECK-NEXT: i32.store 0($pop[[L9]]), $pop[[L8]]
200 store volatile i32 102, i32* %static
201 store volatile i32 103, i32* %dynamic
203 ; Decrement SP in the body by the dynamic amount.
204 ; CHECK: i[[PTR]].sub
205 ; CHECK: local.tee $push{{.+}}=, [[dynamic2_local:.+]], $pop{{.+}}
206 %dynamic.2 = alloca i32, i32 %alloc
208 ; CHECK-NOT: $[[FP]]=
210 ; Ensure neither the static nor dynamic address changes after the second
211 ; modification of the stack pointer.
212 ; CHECK: local.get $push[[L22:.+]]=, [[FP]]
213 ; CHECK: i32.const $push[[L9:.+]]=, 104
214 ; CHECK-NEXT: i32.store [[static_offset]]($pop[[L22]]), $pop[[L9]]
215 ; CHECK-NEXT: local.get $push[[L23:.+]]=, [[dynamic_local]]
216 ; CHECK-NEXT: i32.const $push[[L10:.+]]=, 105
217 ; CHECK-NEXT: i32.store 0($pop[[L23]]), $pop[[L10]]
218 ; CHECK-NEXT: local.get $push[[L23:.+]]=, [[dynamic2_local]]
219 ; CHECK-NEXT: i32.const $push[[L11:.+]]=, 106
220 ; CHECK-NEXT: i32.store 0($pop[[L23]]), $pop[[L11]]
221 store volatile i32 104, i32* %static
222 store volatile i32 105, i32* %dynamic
223 store volatile i32 106, i32* %dynamic.2
225 ; Writeback to memory.
226 ; CHECK: local.get $push[[L24:.+]]=, [[FP]]{{$}}
227 ; CHECK: i[[PTR]].const $push[[L18:.+]]=, 16
228 ; CHECK-NEXT: i[[PTR]].add $push[[L19:.+]]=, $pop[[L24]], $pop[[L18]]
229 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L19]]
233 declare i8* @llvm.stacksave()
234 declare void @llvm.stackrestore(i8*)
236 ; CHECK-LABEL: llvm_stack_builtins:
237 define void @llvm_stack_builtins(i32 %alloc) noredzone {
238 ; CHECK: global.get $push[[L11:.+]]=, __stack_pointer{{$}}
239 ; CHECK-NEXT: local.tee $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
240 ; CHECK-NEXT: local.set [[STACK:.+]], $pop[[L10]]
241 %stack = call i8* @llvm.stacksave()
243 ; Ensure we don't reassign the stacksave local
244 ; CHECK-NOT: local.set [[STACK]],
245 %dynamic = alloca i32, i32 %alloc
247 ; CHECK: local.get $push[[L12:.+]]=, [[STACK]]
248 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L12]]
249 call void @llvm.stackrestore(i8* %stack)
254 ; Not actually using the alloca'd variables exposed an issue with register
255 ; stackification, where copying the stack pointer into the frame pointer was
256 ; moved after the stack pointer was updated for the dynamic alloca.
257 ; CHECK-LABEL: dynamic_alloca_nouse:
258 define void @dynamic_alloca_nouse(i32 %alloc) noredzone {
259 ; CHECK: global.get $push[[L11:.+]]=, __stack_pointer{{$}}
260 ; CHECK-NEXT: local.tee $push[[L10:.+]]=, {{.+}}, $pop[[L11]]
261 ; CHECK-NEXT: local.set [[FP:.+]], $pop[[L10]]
262 %dynamic = alloca i32, i32 %alloc
264 ; CHECK-NOT: local.set [[FP]],
266 ; CHECK: local.get $push[[L12:.+]]=, [[FP]]
267 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L12]]
271 ; The use of the alloca in a phi causes a CopyToReg DAG node to be generated,
272 ; which has to have special handling because CopyToReg can't have a FI operand
273 ; CHECK-LABEL: copytoreg_fi:
274 define void @copytoreg_fi(i1 %cond, i32* %b) {
276 ; CHECK: i[[PTR]].const $push[[L1:.+]]=, 16
277 ; CHECK-NEXT: i[[PTR]].sub $push[[L3:.+]]=, {{.+}}, $pop[[L1]]
279 ; CHECK: i[[PTR]].const $push[[OFF:.+]]=, 12
280 ; CHECK-NEXT: i[[PTR]].add $push[[ADDR:.+]]=, $pop[[L3]], $pop[[OFF]]
281 ; CHECK-NEXT: local.set [[COPY:.+]], $pop[[ADDR]]
284 %a = phi i32* [%addr, %entry], [%b, %body]
286 ; CHECK: local.get $push[[L12:.+]]=, [[COPY]]
287 ; CHECK: i32.store 0($pop[[L12]]),
288 br i1 %cond, label %body, label %exit
293 declare void @use_i8_star(i8*)
294 declare i8* @llvm.frameaddress(i32)
296 ; Test __builtin_frame_address(0).
297 ; CHECK-LABEL: frameaddress_0:
298 ; CHECK: global.get $push[[L3:.+]]=, __stack_pointer{{$}}
299 ; CHECK-NEXT: local.tee $push[[L2:.+]]=, [[FP:.+]], $pop[[L3]]{{$}}
300 ; CHECK-NEXT: call use_i8_star, $pop[[L2]]
301 ; CHECK-NEXT: local.get $push[[L5:.+]]=, [[FP]]
302 ; CHECK-NEXT: global.set __stack_pointer, $pop[[L5]]
303 define void @frameaddress_0() {
304 %t = call i8* @llvm.frameaddress(i32 0)
305 call void @use_i8_star(i8* %t)
309 ; Test __builtin_frame_address(1).
311 ; CHECK-LABEL: frameaddress_1:
312 ; CHECK: i[[PTR]].const $push0=, 0{{$}}
313 ; CHECK-NEXT: call use_i8_star, $pop0{{$}}
314 ; CHECK-NEXT: return{{$}}
315 define void @frameaddress_1() {
316 %t = call i8* @llvm.frameaddress(i32 1)
317 call void @use_i8_star(i8* %t)
321 ; Test a stack address passed to an inline asm.
322 ; CHECK-LABEL: inline_asm:
323 ; CHECK: global.get {{.+}}, __stack_pointer{{$}}
325 ; CHECK-NEXT: # %{{[0-9]+}}{{$}}
326 ; CHECK-NEXT: #NO_APP
327 define void @inline_asm() {
329 call void asm sideeffect "# %0", "r"(i8* %tmp)
333 ; We optimize the format of "frame offset + operand" by folding it, but this is
334 ; only possible when that operand is an immediate. In this example it is a
335 ; global address, so we should not fold it.
336 ; CHECK-LABEL: frame_offset_with_global_address
337 ; CHECK: i[[PTR]].const ${{.*}}=, str
338 @str = local_unnamed_addr global [3 x i8] c"abc", align 16
339 define i8 @frame_offset_with_global_address() {
340 %1 = alloca i8, align 4
341 %2 = ptrtoint i8* %1 to i32
342 ;; Here @str is a global address and not an immediate, so cannot be folded
343 %3 = getelementptr [3 x i8], [3 x i8]* @str, i32 0, i32 %2
344 %4 = load i8, i8* %3, align 8
349 ; TODO: test over-aligned alloca