1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2
2 ; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GCN,WAVE32,WAVE32-OPT %s
3 ; RUN: llc -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GCN,WAVE64,WAVE64-OPT %s
5 ; RUN: llc -O0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 < %s | FileCheck -check-prefixes=GCN,WAVE32,WAVE32-O0 %s
6 ; RUN: llc -O0 -mtriple=amdgcn-amd-amdpal -mcpu=gfx1030 -mattr=+wavefrontsize64 < %s | FileCheck -check-prefixes=GCN,WAVE64,WAVE64-O0 %s
8 declare ptr addrspace(5) @llvm.stacksave.p5()
9 declare void @llvm.stackrestore.p5(ptr addrspace(5))
11 define hidden void @stack_passed_argument([32 x i32], i32) {
12 ; GCN-LABEL: stack_passed_argument:
14 ; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
15 ; GCN-NEXT: s_setpc_b64 s[30:31]
19 define void @func_store_stacksave() {
20 ; WAVE32-OPT-LABEL: func_store_stacksave:
21 ; WAVE32-OPT: ; %bb.0:
22 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
23 ; WAVE32-OPT-NEXT: s_lshr_b32 s4, s32, 5
24 ; WAVE32-OPT-NEXT: ;;#ASMSTART
25 ; WAVE32-OPT-NEXT: ; use s4
26 ; WAVE32-OPT-NEXT: ;;#ASMEND
27 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
29 ; WAVE64-OPT-LABEL: func_store_stacksave:
30 ; WAVE64-OPT: ; %bb.0:
31 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
32 ; WAVE64-OPT-NEXT: s_lshr_b32 s4, s32, 6
33 ; WAVE64-OPT-NEXT: ;;#ASMSTART
34 ; WAVE64-OPT-NEXT: ; use s4
35 ; WAVE64-OPT-NEXT: ;;#ASMEND
36 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
38 ; WAVE32-O0-LABEL: func_store_stacksave:
40 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
41 ; WAVE32-O0-NEXT: s_mov_b32 s4, s32
42 ; WAVE32-O0-NEXT: s_lshr_b32 s4, s4, 5
43 ; WAVE32-O0-NEXT: ;;#ASMSTART
44 ; WAVE32-O0-NEXT: ; use s4
45 ; WAVE32-O0-NEXT: ;;#ASMEND
46 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
48 ; WAVE64-O0-LABEL: func_store_stacksave:
50 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
51 ; WAVE64-O0-NEXT: s_mov_b32 s4, s32
52 ; WAVE64-O0-NEXT: s_lshr_b32 s4, s4, 6
53 ; WAVE64-O0-NEXT: ;;#ASMSTART
54 ; WAVE64-O0-NEXT: ; use s4
55 ; WAVE64-O0-NEXT: ;;#ASMEND
56 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
57 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
58 call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
62 define amdgpu_kernel void @kernel_store_stacksave() {
63 ; WAVE32-OPT-LABEL: kernel_store_stacksave:
64 ; WAVE32-OPT: ; %bb.0:
65 ; WAVE32-OPT-NEXT: s_lshr_b32 s0, s32, 5
66 ; WAVE32-OPT-NEXT: ;;#ASMSTART
67 ; WAVE32-OPT-NEXT: ; use s0
68 ; WAVE32-OPT-NEXT: ;;#ASMEND
69 ; WAVE32-OPT-NEXT: s_endpgm
71 ; WAVE64-OPT-LABEL: kernel_store_stacksave:
72 ; WAVE64-OPT: ; %bb.0:
73 ; WAVE64-OPT-NEXT: s_lshr_b32 s0, s32, 6
74 ; WAVE64-OPT-NEXT: ;;#ASMSTART
75 ; WAVE64-OPT-NEXT: ; use s0
76 ; WAVE64-OPT-NEXT: ;;#ASMEND
77 ; WAVE64-OPT-NEXT: s_endpgm
79 ; WAVE32-O0-LABEL: kernel_store_stacksave:
81 ; WAVE32-O0-NEXT: s_mov_b32 s0, s32
82 ; WAVE32-O0-NEXT: s_lshr_b32 s0, s0, 5
83 ; WAVE32-O0-NEXT: ;;#ASMSTART
84 ; WAVE32-O0-NEXT: ; use s0
85 ; WAVE32-O0-NEXT: ;;#ASMEND
86 ; WAVE32-O0-NEXT: s_endpgm
88 ; WAVE64-O0-LABEL: kernel_store_stacksave:
90 ; WAVE64-O0-NEXT: s_mov_b32 s0, s32
91 ; WAVE64-O0-NEXT: s_lshr_b32 s0, s0, 6
92 ; WAVE64-O0-NEXT: ;;#ASMSTART
93 ; WAVE64-O0-NEXT: ; use s0
94 ; WAVE64-O0-NEXT: ;;#ASMEND
95 ; WAVE64-O0-NEXT: s_endpgm
96 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
97 call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
101 define amdgpu_kernel void @kernel_store_stacksave_nocall() {
102 ; WAVE32-OPT-LABEL: kernel_store_stacksave_nocall:
103 ; WAVE32-OPT: ; %bb.0:
104 ; WAVE32-OPT-NEXT: s_getpc_b64 s[4:5]
105 ; WAVE32-OPT-NEXT: s_mov_b32 s4, s0
106 ; WAVE32-OPT-NEXT: v_mov_b32_e32 v0, 0
107 ; WAVE32-OPT-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
108 ; WAVE32-OPT-NEXT: s_waitcnt lgkmcnt(0)
109 ; WAVE32-OPT-NEXT: s_bitset0_b32 s7, 21
110 ; WAVE32-OPT-NEXT: s_add_u32 s4, s4, s1
111 ; WAVE32-OPT-NEXT: s_addc_u32 s5, s5, 0
112 ; WAVE32-OPT-NEXT: s_lshr_b32 s0, s32, 5
113 ; WAVE32-OPT-NEXT: v_mov_b32_e32 v1, s0
114 ; WAVE32-OPT-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen
115 ; WAVE32-OPT-NEXT: s_endpgm
117 ; WAVE64-OPT-LABEL: kernel_store_stacksave_nocall:
118 ; WAVE64-OPT: ; %bb.0:
119 ; WAVE64-OPT-NEXT: s_getpc_b64 s[4:5]
120 ; WAVE64-OPT-NEXT: s_mov_b32 s4, s0
121 ; WAVE64-OPT-NEXT: v_mov_b32_e32 v0, 0
122 ; WAVE64-OPT-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x0
123 ; WAVE64-OPT-NEXT: s_waitcnt lgkmcnt(0)
124 ; WAVE64-OPT-NEXT: s_add_u32 s4, s4, s1
125 ; WAVE64-OPT-NEXT: s_addc_u32 s5, s5, 0
126 ; WAVE64-OPT-NEXT: s_lshr_b32 s0, s32, 6
127 ; WAVE64-OPT-NEXT: v_mov_b32_e32 v1, s0
128 ; WAVE64-OPT-NEXT: buffer_store_dword v0, v1, s[4:7], 0 offen
129 ; WAVE64-OPT-NEXT: s_endpgm
131 ; WAVE32-O0-LABEL: kernel_store_stacksave_nocall:
132 ; WAVE32-O0: ; %bb.0:
133 ; WAVE32-O0-NEXT: s_getpc_b64 s[12:13]
134 ; WAVE32-O0-NEXT: s_mov_b32 s12, s0
135 ; WAVE32-O0-NEXT: s_load_dwordx4 s[12:15], s[12:13], 0x0
136 ; WAVE32-O0-NEXT: s_waitcnt lgkmcnt(0)
137 ; WAVE32-O0-NEXT: s_bitset0_b32 s15, 21
138 ; WAVE32-O0-NEXT: s_add_u32 s12, s12, s11
139 ; WAVE32-O0-NEXT: s_addc_u32 s13, s13, 0
140 ; WAVE32-O0-NEXT: s_mov_b32 s0, s32
141 ; WAVE32-O0-NEXT: s_lshr_b32 s0, s0, 5
142 ; WAVE32-O0-NEXT: v_mov_b32_e32 v0, 0
143 ; WAVE32-O0-NEXT: v_mov_b32_e32 v1, s0
144 ; WAVE32-O0-NEXT: buffer_store_dword v0, v1, s[12:15], 0 offen
145 ; WAVE32-O0-NEXT: s_endpgm
147 ; WAVE64-O0-LABEL: kernel_store_stacksave_nocall:
148 ; WAVE64-O0: ; %bb.0:
149 ; WAVE64-O0-NEXT: s_getpc_b64 s[12:13]
150 ; WAVE64-O0-NEXT: s_mov_b32 s12, s0
151 ; WAVE64-O0-NEXT: s_load_dwordx4 s[12:15], s[12:13], 0x0
152 ; WAVE64-O0-NEXT: s_waitcnt lgkmcnt(0)
153 ; WAVE64-O0-NEXT: s_add_u32 s12, s12, s11
154 ; WAVE64-O0-NEXT: s_addc_u32 s13, s13, 0
155 ; WAVE64-O0-NEXT: s_mov_b32 s0, s32
156 ; WAVE64-O0-NEXT: s_lshr_b32 s0, s0, 6
157 ; WAVE64-O0-NEXT: v_mov_b32_e32 v0, 0
158 ; WAVE64-O0-NEXT: v_mov_b32_e32 v1, s0
159 ; WAVE64-O0-NEXT: buffer_store_dword v0, v1, s[12:15], 0 offen
160 ; WAVE64-O0-NEXT: s_endpgm
161 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
162 store i32 0, ptr addrspace(5) %stacksave
166 define void @func_stacksave_nonentry_block(i1 %cond) {
167 ; WAVE32-OPT-LABEL: func_stacksave_nonentry_block:
168 ; WAVE32-OPT: ; %bb.0: ; %bb0
169 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
170 ; WAVE32-OPT-NEXT: v_and_b32_e32 v0, 1, v0
171 ; WAVE32-OPT-NEXT: s_mov_b32 s4, exec_lo
172 ; WAVE32-OPT-NEXT: v_cmpx_eq_u32_e32 1, v0
173 ; WAVE32-OPT-NEXT: s_cbranch_execz .LBB4_2
174 ; WAVE32-OPT-NEXT: ; %bb.1: ; %bb1
175 ; WAVE32-OPT-NEXT: s_lshr_b32 s5, s32, 5
176 ; WAVE32-OPT-NEXT: ;;#ASMSTART
177 ; WAVE32-OPT-NEXT: ; use s5
178 ; WAVE32-OPT-NEXT: ;;#ASMEND
179 ; WAVE32-OPT-NEXT: .LBB4_2: ; %bb2
180 ; WAVE32-OPT-NEXT: s_or_b32 exec_lo, exec_lo, s4
181 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
183 ; WAVE64-OPT-LABEL: func_stacksave_nonentry_block:
184 ; WAVE64-OPT: ; %bb.0: ; %bb0
185 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
186 ; WAVE64-OPT-NEXT: v_and_b32_e32 v0, 1, v0
187 ; WAVE64-OPT-NEXT: s_mov_b64 s[4:5], exec
188 ; WAVE64-OPT-NEXT: v_cmpx_eq_u32_e32 1, v0
189 ; WAVE64-OPT-NEXT: s_cbranch_execz .LBB4_2
190 ; WAVE64-OPT-NEXT: ; %bb.1: ; %bb1
191 ; WAVE64-OPT-NEXT: s_lshr_b32 s6, s32, 6
192 ; WAVE64-OPT-NEXT: ;;#ASMSTART
193 ; WAVE64-OPT-NEXT: ; use s6
194 ; WAVE64-OPT-NEXT: ;;#ASMEND
195 ; WAVE64-OPT-NEXT: .LBB4_2: ; %bb2
196 ; WAVE64-OPT-NEXT: s_or_b64 exec, exec, s[4:5]
197 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
199 ; WAVE32-O0-LABEL: func_stacksave_nonentry_block:
200 ; WAVE32-O0: ; %bb.0: ; %bb0
201 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
202 ; WAVE32-O0-NEXT: s_xor_saveexec_b32 s4, -1
203 ; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
204 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s4
205 ; WAVE32-O0-NEXT: ; implicit-def: $vgpr1 : SGPR spill to VGPR lane
206 ; WAVE32-O0-NEXT: v_mov_b32_e32 v1, v0
207 ; WAVE32-O0-NEXT: s_or_saveexec_b32 s7, -1
208 ; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
209 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s7
210 ; WAVE32-O0-NEXT: v_and_b32_e64 v1, 1, v1
211 ; WAVE32-O0-NEXT: v_cmp_eq_u32_e64 s5, v1, 1
212 ; WAVE32-O0-NEXT: s_mov_b32 s4, exec_lo
213 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
214 ; WAVE32-O0-NEXT: v_writelane_b32 v0, s4, 0
215 ; WAVE32-O0-NEXT: s_or_saveexec_b32 s7, -1
216 ; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
217 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s7
218 ; WAVE32-O0-NEXT: s_and_b32 s4, s4, s5
219 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s4
220 ; WAVE32-O0-NEXT: s_cbranch_execz .LBB4_2
221 ; WAVE32-O0-NEXT: ; %bb.1: ; %bb1
222 ; WAVE32-O0-NEXT: s_mov_b32 s4, s32
223 ; WAVE32-O0-NEXT: s_lshr_b32 s4, s4, 5
224 ; WAVE32-O0-NEXT: ;;#ASMSTART
225 ; WAVE32-O0-NEXT: ; use s4
226 ; WAVE32-O0-NEXT: ;;#ASMEND
227 ; WAVE32-O0-NEXT: .LBB4_2: ; %bb2
228 ; WAVE32-O0-NEXT: s_or_saveexec_b32 s7, -1
229 ; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
230 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s7
231 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
232 ; WAVE32-O0-NEXT: v_readlane_b32 s4, v0, 0
233 ; WAVE32-O0-NEXT: s_or_b32 exec_lo, exec_lo, s4
234 ; WAVE32-O0-NEXT: ; kill: killed $vgpr0
235 ; WAVE32-O0-NEXT: s_xor_saveexec_b32 s4, -1
236 ; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
237 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s4
238 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
239 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
241 ; WAVE64-O0-LABEL: func_stacksave_nonentry_block:
242 ; WAVE64-O0: ; %bb.0: ; %bb0
243 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
244 ; WAVE64-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
245 ; WAVE64-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
246 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[4:5]
247 ; WAVE64-O0-NEXT: ; implicit-def: $vgpr1 : SGPR spill to VGPR lane
248 ; WAVE64-O0-NEXT: v_mov_b32_e32 v1, v0
249 ; WAVE64-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
250 ; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
251 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[10:11]
252 ; WAVE64-O0-NEXT: v_and_b32_e64 v1, 1, v1
253 ; WAVE64-O0-NEXT: v_cmp_eq_u32_e64 s[6:7], v1, 1
254 ; WAVE64-O0-NEXT: s_mov_b64 s[4:5], exec
255 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
256 ; WAVE64-O0-NEXT: v_writelane_b32 v0, s4, 0
257 ; WAVE64-O0-NEXT: v_writelane_b32 v0, s5, 1
258 ; WAVE64-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
259 ; WAVE64-O0-NEXT: buffer_store_dword v0, off, s[0:3], s32 ; 4-byte Folded Spill
260 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[10:11]
261 ; WAVE64-O0-NEXT: s_and_b64 s[4:5], s[4:5], s[6:7]
262 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[4:5]
263 ; WAVE64-O0-NEXT: s_cbranch_execz .LBB4_2
264 ; WAVE64-O0-NEXT: ; %bb.1: ; %bb1
265 ; WAVE64-O0-NEXT: s_mov_b32 s4, s32
266 ; WAVE64-O0-NEXT: s_lshr_b32 s4, s4, 6
267 ; WAVE64-O0-NEXT: ;;#ASMSTART
268 ; WAVE64-O0-NEXT: ; use s4
269 ; WAVE64-O0-NEXT: ;;#ASMEND
270 ; WAVE64-O0-NEXT: .LBB4_2: ; %bb2
271 ; WAVE64-O0-NEXT: s_or_saveexec_b64 s[10:11], -1
272 ; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 ; 4-byte Folded Reload
273 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[10:11]
274 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
275 ; WAVE64-O0-NEXT: v_readlane_b32 s4, v0, 0
276 ; WAVE64-O0-NEXT: v_readlane_b32 s5, v0, 1
277 ; WAVE64-O0-NEXT: s_or_b64 exec, exec, s[4:5]
278 ; WAVE64-O0-NEXT: ; kill: killed $vgpr0
279 ; WAVE64-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
280 ; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
281 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[4:5]
282 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
283 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
285 br i1 %cond, label %bb1, label %bb2
288 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
289 call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
296 define void @func_stackrestore_poison() {
297 ; WAVE32-OPT-LABEL: func_stackrestore_poison:
298 ; WAVE32-OPT: ; %bb.0:
299 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
300 ; WAVE32-OPT-NEXT: s_lshl_b32 s32, s4, 5
301 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
303 ; WAVE64-OPT-LABEL: func_stackrestore_poison:
304 ; WAVE64-OPT: ; %bb.0:
305 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
306 ; WAVE64-OPT-NEXT: s_lshl_b32 s32, s4, 6
307 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
309 ; WAVE32-O0-LABEL: func_stackrestore_poison:
310 ; WAVE32-O0: ; %bb.0:
311 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr4
312 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
313 ; WAVE32-O0-NEXT: s_lshl_b32 s4, s4, 5
314 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
315 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
317 ; WAVE64-O0-LABEL: func_stackrestore_poison:
318 ; WAVE64-O0: ; %bb.0:
319 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr4
320 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
321 ; WAVE64-O0-NEXT: s_lshl_b32 s4, s4, 6
322 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
323 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
324 call void @llvm.stackrestore.p5(ptr addrspace(5) poison)
328 define void @func_stackrestore_null() {
329 ; WAVE32-OPT-LABEL: func_stackrestore_null:
330 ; WAVE32-OPT: ; %bb.0:
331 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
332 ; WAVE32-OPT-NEXT: s_mov_b32 s32, 0
333 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
335 ; WAVE64-OPT-LABEL: func_stackrestore_null:
336 ; WAVE64-OPT: ; %bb.0:
337 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
338 ; WAVE64-OPT-NEXT: s_mov_b32 s32, 0
339 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
341 ; WAVE32-O0-LABEL: func_stackrestore_null:
342 ; WAVE32-O0: ; %bb.0:
343 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
344 ; WAVE32-O0-NEXT: s_mov_b32 s4, 0
345 ; WAVE32-O0-NEXT: s_lshl_b32 s4, s4, 5
346 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
347 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
349 ; WAVE64-O0-LABEL: func_stackrestore_null:
350 ; WAVE64-O0: ; %bb.0:
351 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
352 ; WAVE64-O0-NEXT: s_mov_b32 s4, 0
353 ; WAVE64-O0-NEXT: s_lshl_b32 s4, s4, 6
354 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
355 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
356 call void @llvm.stackrestore.p5(ptr addrspace(5) null)
360 define void @func_stackrestore_neg1() {
361 ; WAVE32-OPT-LABEL: func_stackrestore_neg1:
362 ; WAVE32-OPT: ; %bb.0:
363 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
364 ; WAVE32-OPT-NEXT: s_movk_i32 s32, 0xffe0
365 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
367 ; WAVE64-OPT-LABEL: func_stackrestore_neg1:
368 ; WAVE64-OPT: ; %bb.0:
369 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
370 ; WAVE64-OPT-NEXT: s_movk_i32 s32, 0xffc0
371 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
373 ; WAVE32-O0-LABEL: func_stackrestore_neg1:
374 ; WAVE32-O0: ; %bb.0:
375 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
376 ; WAVE32-O0-NEXT: s_mov_b32 s4, -1
377 ; WAVE32-O0-NEXT: s_lshl_b32 s4, s4, 5
378 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
379 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
381 ; WAVE64-O0-LABEL: func_stackrestore_neg1:
382 ; WAVE64-O0: ; %bb.0:
383 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
384 ; WAVE64-O0-NEXT: s_mov_b32 s4, -1
385 ; WAVE64-O0-NEXT: s_lshl_b32 s4, s4, 6
386 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
387 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
388 call void @llvm.stackrestore.p5(ptr addrspace(5) inttoptr (i32 -1 to ptr addrspace(5)))
392 define void @func_stackrestore_42() {
393 ; WAVE32-OPT-LABEL: func_stackrestore_42:
394 ; WAVE32-OPT: ; %bb.0:
395 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
396 ; WAVE32-OPT-NEXT: s_movk_i32 s32, 0x540
397 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
399 ; WAVE64-OPT-LABEL: func_stackrestore_42:
400 ; WAVE64-OPT: ; %bb.0:
401 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
402 ; WAVE64-OPT-NEXT: s_movk_i32 s32, 0xa80
403 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
405 ; WAVE32-O0-LABEL: func_stackrestore_42:
406 ; WAVE32-O0: ; %bb.0:
407 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
408 ; WAVE32-O0-NEXT: s_mov_b32 s4, 42
409 ; WAVE32-O0-NEXT: s_lshl_b32 s4, s4, 5
410 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
411 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
413 ; WAVE64-O0-LABEL: func_stackrestore_42:
414 ; WAVE64-O0: ; %bb.0:
415 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
416 ; WAVE64-O0-NEXT: s_mov_b32 s4, 42
417 ; WAVE64-O0-NEXT: s_lshl_b32 s4, s4, 6
418 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
419 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
420 call void @llvm.stackrestore.p5(ptr addrspace(5) inttoptr (i32 42 to ptr addrspace(5)))
424 define void @func_stacksave_stackrestore() {
425 ; WAVE32-OPT-LABEL: func_stacksave_stackrestore:
426 ; WAVE32-OPT: ; %bb.0:
427 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
428 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
430 ; WAVE64-OPT-LABEL: func_stacksave_stackrestore:
431 ; WAVE64-OPT: ; %bb.0:
432 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
433 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
435 ; WAVE32-O0-LABEL: func_stacksave_stackrestore:
436 ; WAVE32-O0: ; %bb.0:
437 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
438 ; WAVE32-O0-NEXT: s_mov_b32 s4, s32
439 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
440 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
442 ; WAVE64-O0-LABEL: func_stacksave_stackrestore:
443 ; WAVE64-O0: ; %bb.0:
444 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
445 ; WAVE64-O0-NEXT: s_mov_b32 s4, s32
446 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
447 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
448 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
449 call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
453 define void @func_stacksave_stackrestore_use() {
454 ; WAVE32-OPT-LABEL: func_stacksave_stackrestore_use:
455 ; WAVE32-OPT: ; %bb.0:
456 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
457 ; WAVE32-OPT-NEXT: s_lshr_b32 s4, s32, 5
458 ; WAVE32-OPT-NEXT: ;;#ASMSTART
459 ; WAVE32-OPT-NEXT: ; use s4
460 ; WAVE32-OPT-NEXT: ;;#ASMEND
461 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
463 ; WAVE64-OPT-LABEL: func_stacksave_stackrestore_use:
464 ; WAVE64-OPT: ; %bb.0:
465 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
466 ; WAVE64-OPT-NEXT: s_lshr_b32 s4, s32, 6
467 ; WAVE64-OPT-NEXT: ;;#ASMSTART
468 ; WAVE64-OPT-NEXT: ; use s4
469 ; WAVE64-OPT-NEXT: ;;#ASMEND
470 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
472 ; WAVE32-O0-LABEL: func_stacksave_stackrestore_use:
473 ; WAVE32-O0: ; %bb.0:
474 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
475 ; WAVE32-O0-NEXT: s_mov_b32 s4, s32
476 ; WAVE32-O0-NEXT: s_lshr_b32 s5, s4, 5
477 ; WAVE32-O0-NEXT: ;;#ASMSTART
478 ; WAVE32-O0-NEXT: ; use s5
479 ; WAVE32-O0-NEXT: ;;#ASMEND
480 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
481 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
483 ; WAVE64-O0-LABEL: func_stacksave_stackrestore_use:
484 ; WAVE64-O0: ; %bb.0:
485 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
486 ; WAVE64-O0-NEXT: s_mov_b32 s4, s32
487 ; WAVE64-O0-NEXT: s_lshr_b32 s5, s4, 6
488 ; WAVE64-O0-NEXT: ;;#ASMSTART
489 ; WAVE64-O0-NEXT: ; use s5
490 ; WAVE64-O0-NEXT: ;;#ASMEND
491 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
492 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
493 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
494 call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
495 call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
499 define amdgpu_kernel void @kernel_stacksave_stackrestore_use() {
500 ; WAVE32-OPT-LABEL: kernel_stacksave_stackrestore_use:
501 ; WAVE32-OPT: ; %bb.0:
502 ; WAVE32-OPT-NEXT: s_lshr_b32 s0, s32, 5
503 ; WAVE32-OPT-NEXT: ;;#ASMSTART
504 ; WAVE32-OPT-NEXT: ; use s0
505 ; WAVE32-OPT-NEXT: ;;#ASMEND
506 ; WAVE32-OPT-NEXT: s_endpgm
508 ; WAVE64-OPT-LABEL: kernel_stacksave_stackrestore_use:
509 ; WAVE64-OPT: ; %bb.0:
510 ; WAVE64-OPT-NEXT: s_lshr_b32 s0, s32, 6
511 ; WAVE64-OPT-NEXT: ;;#ASMSTART
512 ; WAVE64-OPT-NEXT: ; use s0
513 ; WAVE64-OPT-NEXT: ;;#ASMEND
514 ; WAVE64-OPT-NEXT: s_endpgm
516 ; WAVE32-O0-LABEL: kernel_stacksave_stackrestore_use:
517 ; WAVE32-O0: ; %bb.0:
518 ; WAVE32-O0-NEXT: s_mov_b32 s0, s32
519 ; WAVE32-O0-NEXT: s_lshr_b32 s1, s0, 5
520 ; WAVE32-O0-NEXT: ;;#ASMSTART
521 ; WAVE32-O0-NEXT: ; use s1
522 ; WAVE32-O0-NEXT: ;;#ASMEND
523 ; WAVE32-O0-NEXT: s_mov_b32 s32, s0
524 ; WAVE32-O0-NEXT: s_endpgm
526 ; WAVE64-O0-LABEL: kernel_stacksave_stackrestore_use:
527 ; WAVE64-O0: ; %bb.0:
528 ; WAVE64-O0-NEXT: s_mov_b32 s0, s32
529 ; WAVE64-O0-NEXT: s_lshr_b32 s1, s0, 6
530 ; WAVE64-O0-NEXT: ;;#ASMSTART
531 ; WAVE64-O0-NEXT: ; use s1
532 ; WAVE64-O0-NEXT: ;;#ASMEND
533 ; WAVE64-O0-NEXT: s_mov_b32 s32, s0
534 ; WAVE64-O0-NEXT: s_endpgm
535 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
536 call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
537 call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
541 define void @func_stacksave_stackrestore_voffset(i32 %offset) {
542 ; WAVE32-OPT-LABEL: func_stacksave_stackrestore_voffset:
543 ; WAVE32-OPT: ; %bb.0:
544 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
545 ; WAVE32-OPT-NEXT: s_lshr_b32 s4, s32, 5
546 ; WAVE32-OPT-NEXT: v_add_nc_u32_e32 v0, s4, v0
547 ; WAVE32-OPT-NEXT: v_readfirstlane_b32 s4, v0
548 ; WAVE32-OPT-NEXT: s_lshl_b32 s32, s4, 5
549 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
551 ; WAVE64-OPT-LABEL: func_stacksave_stackrestore_voffset:
552 ; WAVE64-OPT: ; %bb.0:
553 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
554 ; WAVE64-OPT-NEXT: s_lshr_b32 s4, s32, 6
555 ; WAVE64-OPT-NEXT: v_add_nc_u32_e32 v0, s4, v0
556 ; WAVE64-OPT-NEXT: v_readfirstlane_b32 s4, v0
557 ; WAVE64-OPT-NEXT: s_lshl_b32 s32, s4, 6
558 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
560 ; WAVE32-O0-LABEL: func_stacksave_stackrestore_voffset:
561 ; WAVE32-O0: ; %bb.0:
562 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
563 ; WAVE32-O0-NEXT: s_mov_b32 s4, s32
564 ; WAVE32-O0-NEXT: s_lshr_b32 s4, s4, 5
565 ; WAVE32-O0-NEXT: v_add_nc_u32_e64 v0, s4, v0
566 ; WAVE32-O0-NEXT: v_readfirstlane_b32 s4, v0
567 ; WAVE32-O0-NEXT: s_lshl_b32 s4, s4, 5
568 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
569 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
571 ; WAVE64-O0-LABEL: func_stacksave_stackrestore_voffset:
572 ; WAVE64-O0: ; %bb.0:
573 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
574 ; WAVE64-O0-NEXT: s_mov_b32 s4, s32
575 ; WAVE64-O0-NEXT: s_lshr_b32 s4, s4, 6
576 ; WAVE64-O0-NEXT: v_add_nc_u32_e64 v0, s4, v0
577 ; WAVE64-O0-NEXT: v_readfirstlane_b32 s4, v0
578 ; WAVE64-O0-NEXT: s_lshl_b32 s4, s4, 6
579 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
580 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
581 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
582 %gep = getelementptr i8, ptr addrspace(5) %stacksave, i32 %offset
583 call void @llvm.stackrestore.p5(ptr addrspace(5) %gep)
587 define void @func_stacksave_vgpr(ptr addrspace(5) %stack) {
588 ; WAVE32-OPT-LABEL: func_stacksave_vgpr:
589 ; WAVE32-OPT: ; %bb.0:
590 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
591 ; WAVE32-OPT-NEXT: v_readfirstlane_b32 s4, v0
592 ; WAVE32-OPT-NEXT: s_lshl_b32 s32, s4, 5
593 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
595 ; WAVE64-OPT-LABEL: func_stacksave_vgpr:
596 ; WAVE64-OPT: ; %bb.0:
597 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
598 ; WAVE64-OPT-NEXT: v_readfirstlane_b32 s4, v0
599 ; WAVE64-OPT-NEXT: s_lshl_b32 s32, s4, 6
600 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
602 ; WAVE32-O0-LABEL: func_stacksave_vgpr:
603 ; WAVE32-O0: ; %bb.0:
604 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
605 ; WAVE32-O0-NEXT: v_readfirstlane_b32 s4, v0
606 ; WAVE32-O0-NEXT: s_lshl_b32 s4, s4, 5
607 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
608 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
610 ; WAVE64-O0-LABEL: func_stacksave_vgpr:
611 ; WAVE64-O0: ; %bb.0:
612 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
613 ; WAVE64-O0-NEXT: v_readfirstlane_b32 s4, v0
614 ; WAVE64-O0-NEXT: s_lshl_b32 s4, s4, 6
615 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
616 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
617 call void @llvm.stackrestore.p5(ptr addrspace(5) %stack)
621 define amdgpu_gfx void @func_stacksave_sgpr(ptr addrspace(5) inreg %stack) {
622 ; WAVE32-OPT-LABEL: func_stacksave_sgpr:
623 ; WAVE32-OPT: ; %bb.0:
624 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
625 ; WAVE32-OPT-NEXT: s_lshl_b32 s32, s4, 5
626 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
628 ; WAVE64-OPT-LABEL: func_stacksave_sgpr:
629 ; WAVE64-OPT: ; %bb.0:
630 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
631 ; WAVE64-OPT-NEXT: s_lshl_b32 s32, s4, 6
632 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
634 ; WAVE32-O0-LABEL: func_stacksave_sgpr:
635 ; WAVE32-O0: ; %bb.0:
636 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
637 ; WAVE32-O0-NEXT: s_lshl_b32 s34, s4, 5
638 ; WAVE32-O0-NEXT: s_mov_b32 s32, s34
639 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
641 ; WAVE64-O0-LABEL: func_stacksave_sgpr:
642 ; WAVE64-O0: ; %bb.0:
643 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
644 ; WAVE64-O0-NEXT: s_lshl_b32 s34, s4, 6
645 ; WAVE64-O0-NEXT: s_mov_b32 s32, s34
646 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
647 call void @llvm.stackrestore.p5(ptr addrspace(5) %stack)
651 define amdgpu_kernel void @kernel_stacksave_sgpr(ptr addrspace(5) %stack) {
652 ; WAVE32-OPT-LABEL: kernel_stacksave_sgpr:
653 ; WAVE32-OPT: ; %bb.0:
654 ; WAVE32-OPT-NEXT: s_load_dword s0, s[0:1], 0x0
655 ; WAVE32-OPT-NEXT: s_waitcnt lgkmcnt(0)
656 ; WAVE32-OPT-NEXT: ;;#ASMSTART
657 ; WAVE32-OPT-NEXT: ; use s0
658 ; WAVE32-OPT-NEXT: ;;#ASMEND
659 ; WAVE32-OPT-NEXT: s_endpgm
661 ; WAVE64-OPT-LABEL: kernel_stacksave_sgpr:
662 ; WAVE64-OPT: ; %bb.0:
663 ; WAVE64-OPT-NEXT: s_load_dword s0, s[0:1], 0x0
664 ; WAVE64-OPT-NEXT: s_waitcnt lgkmcnt(0)
665 ; WAVE64-OPT-NEXT: ;;#ASMSTART
666 ; WAVE64-OPT-NEXT: ; use s0
667 ; WAVE64-OPT-NEXT: ;;#ASMEND
668 ; WAVE64-OPT-NEXT: s_endpgm
670 ; WAVE32-O0-LABEL: kernel_stacksave_sgpr:
671 ; WAVE32-O0: ; %bb.0:
672 ; WAVE32-O0-NEXT: s_load_dword s0, s[4:5], 0x0
673 ; WAVE32-O0-NEXT: s_waitcnt lgkmcnt(0)
674 ; WAVE32-O0-NEXT: s_mov_b32 s1, s0
675 ; WAVE32-O0-NEXT: ;;#ASMSTART
676 ; WAVE32-O0-NEXT: ; use s1
677 ; WAVE32-O0-NEXT: ;;#ASMEND
678 ; WAVE32-O0-NEXT: s_lshl_b32 s0, s0, 5
679 ; WAVE32-O0-NEXT: s_mov_b32 s32, s0
680 ; WAVE32-O0-NEXT: s_endpgm
682 ; WAVE64-O0-LABEL: kernel_stacksave_sgpr:
683 ; WAVE64-O0: ; %bb.0:
684 ; WAVE64-O0-NEXT: s_load_dword s0, s[4:5], 0x0
685 ; WAVE64-O0-NEXT: s_waitcnt lgkmcnt(0)
686 ; WAVE64-O0-NEXT: s_mov_b32 s1, s0
687 ; WAVE64-O0-NEXT: ;;#ASMSTART
688 ; WAVE64-O0-NEXT: ; use s1
689 ; WAVE64-O0-NEXT: ;;#ASMEND
690 ; WAVE64-O0-NEXT: s_lshl_b32 s0, s0, 6
691 ; WAVE64-O0-NEXT: s_mov_b32 s32, s0
692 ; WAVE64-O0-NEXT: s_endpgm
693 call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stack)
694 call void @llvm.stackrestore.p5(ptr addrspace(5) %stack)
698 define amdgpu_kernel void @kernel_stacksave_stackrestore_call_with_stack_objects() {
699 ; WAVE32-OPT-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
700 ; WAVE32-OPT: ; %bb.0:
701 ; WAVE32-OPT-NEXT: s_getpc_b64 s[8:9]
702 ; WAVE32-OPT-NEXT: s_mov_b32 s8, s0
703 ; WAVE32-OPT-NEXT: s_movk_i32 s32, 0x1200
704 ; WAVE32-OPT-NEXT: s_load_dwordx4 s[8:11], s[8:9], 0x0
705 ; WAVE32-OPT-NEXT: s_mov_b32 s0, s32
706 ; WAVE32-OPT-NEXT: v_mov_b32_e32 v0, 42
707 ; WAVE32-OPT-NEXT: v_mov_b32_e32 v1, 17
708 ; WAVE32-OPT-NEXT: s_mov_b32 s5, stack_passed_argument@abs32@hi
709 ; WAVE32-OPT-NEXT: s_mov_b32 s4, stack_passed_argument@abs32@lo
710 ; WAVE32-OPT-NEXT: s_waitcnt lgkmcnt(0)
711 ; WAVE32-OPT-NEXT: s_bitset0_b32 s11, 21
712 ; WAVE32-OPT-NEXT: s_add_u32 s8, s8, s1
713 ; WAVE32-OPT-NEXT: s_addc_u32 s9, s9, 0
714 ; WAVE32-OPT-NEXT: s_lshr_b32 s6, s0, 5
715 ; WAVE32-OPT-NEXT: s_mov_b64 s[0:1], s[8:9]
716 ; WAVE32-OPT-NEXT: s_mov_b64 s[2:3], s[10:11]
717 ; WAVE32-OPT-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:4
718 ; WAVE32-OPT-NEXT: s_waitcnt_vscnt null, 0x0
719 ; WAVE32-OPT-NEXT: buffer_store_dword v1, off, s[8:11], s32 offset:4
720 ; WAVE32-OPT-NEXT: s_swappc_b64 s[30:31], s[4:5]
721 ; WAVE32-OPT-NEXT: ;;#ASMSTART
722 ; WAVE32-OPT-NEXT: ; use s6
723 ; WAVE32-OPT-NEXT: ;;#ASMEND
724 ; WAVE32-OPT-NEXT: s_endpgm
726 ; WAVE64-OPT-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
727 ; WAVE64-OPT: ; %bb.0:
728 ; WAVE64-OPT-NEXT: s_getpc_b64 s[8:9]
729 ; WAVE64-OPT-NEXT: s_mov_b32 s8, s0
730 ; WAVE64-OPT-NEXT: s_movk_i32 s32, 0x2400
731 ; WAVE64-OPT-NEXT: s_load_dwordx4 s[8:11], s[8:9], 0x0
732 ; WAVE64-OPT-NEXT: s_mov_b32 s0, s32
733 ; WAVE64-OPT-NEXT: v_mov_b32_e32 v0, 42
734 ; WAVE64-OPT-NEXT: v_mov_b32_e32 v1, 17
735 ; WAVE64-OPT-NEXT: s_mov_b32 s5, stack_passed_argument@abs32@hi
736 ; WAVE64-OPT-NEXT: s_mov_b32 s4, stack_passed_argument@abs32@lo
737 ; WAVE64-OPT-NEXT: s_waitcnt lgkmcnt(0)
738 ; WAVE64-OPT-NEXT: s_add_u32 s8, s8, s1
739 ; WAVE64-OPT-NEXT: s_addc_u32 s9, s9, 0
740 ; WAVE64-OPT-NEXT: s_lshr_b32 s6, s0, 6
741 ; WAVE64-OPT-NEXT: s_mov_b64 s[0:1], s[8:9]
742 ; WAVE64-OPT-NEXT: s_mov_b64 s[2:3], s[10:11]
743 ; WAVE64-OPT-NEXT: buffer_store_dword v0, off, s[8:11], 0 offset:4
744 ; WAVE64-OPT-NEXT: s_waitcnt_vscnt null, 0x0
745 ; WAVE64-OPT-NEXT: buffer_store_dword v1, off, s[8:11], s32 offset:4
746 ; WAVE64-OPT-NEXT: s_swappc_b64 s[30:31], s[4:5]
747 ; WAVE64-OPT-NEXT: ;;#ASMSTART
748 ; WAVE64-OPT-NEXT: ; use s6
749 ; WAVE64-OPT-NEXT: ;;#ASMEND
750 ; WAVE64-OPT-NEXT: s_endpgm
752 ; WAVE32-O0-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
753 ; WAVE32-O0: ; %bb.0:
754 ; WAVE32-O0-NEXT: s_mov_b32 s32, 0x1200
755 ; WAVE32-O0-NEXT: s_getpc_b64 s[20:21]
756 ; WAVE32-O0-NEXT: s_mov_b32 s20, s0
757 ; WAVE32-O0-NEXT: s_load_dwordx4 s[20:23], s[20:21], 0x0
758 ; WAVE32-O0-NEXT: s_waitcnt lgkmcnt(0)
759 ; WAVE32-O0-NEXT: s_bitset0_b32 s23, 21
760 ; WAVE32-O0-NEXT: s_add_u32 s20, s20, s11
761 ; WAVE32-O0-NEXT: s_addc_u32 s21, s21, 0
762 ; WAVE32-O0-NEXT: ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
763 ; WAVE32-O0-NEXT: s_mov_b32 s14, s10
764 ; WAVE32-O0-NEXT: s_mov_b32 s13, s9
765 ; WAVE32-O0-NEXT: s_mov_b32 s12, s8
766 ; WAVE32-O0-NEXT: s_mov_b64 s[10:11], s[6:7]
767 ; WAVE32-O0-NEXT: s_mov_b64 s[8:9], s[4:5]
768 ; WAVE32-O0-NEXT: s_mov_b64 s[6:7], s[2:3]
769 ; WAVE32-O0-NEXT: s_mov_b64 s[4:5], s[0:1]
770 ; WAVE32-O0-NEXT: s_mov_b32 s0, s32
771 ; WAVE32-O0-NEXT: v_writelane_b32 v3, s0, 0
772 ; WAVE32-O0-NEXT: s_lshr_b32 s0, s0, 5
773 ; WAVE32-O0-NEXT: v_writelane_b32 v3, s0, 1
774 ; WAVE32-O0-NEXT: s_or_saveexec_b32 s19, -1
775 ; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:132 ; 4-byte Folded Spill
776 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s19
777 ; WAVE32-O0-NEXT: v_mov_b32_e32 v3, 42
778 ; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], 0 offset:4
779 ; WAVE32-O0-NEXT: s_waitcnt_vscnt null, 0x0
780 ; WAVE32-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
781 ; WAVE32-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
782 ; WAVE32-O0-NEXT: s_mov_b32 s15, s32
783 ; WAVE32-O0-NEXT: v_mov_b32_e32 v3, 17
784 ; WAVE32-O0-NEXT: buffer_store_dword v3, off, s[20:23], s15 offset:4
785 ; WAVE32-O0-NEXT: s_mov_b32 s15, stack_passed_argument@abs32@hi
786 ; WAVE32-O0-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
787 ; WAVE32-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
788 ; WAVE32-O0-NEXT: s_mov_b32 s17, s15
789 ; WAVE32-O0-NEXT: s_mov_b32 s15, 20
790 ; WAVE32-O0-NEXT: v_lshlrev_b32_e64 v2, s15, v2
791 ; WAVE32-O0-NEXT: s_mov_b32 s15, 10
792 ; WAVE32-O0-NEXT: v_lshlrev_b32_e64 v1, s15, v1
793 ; WAVE32-O0-NEXT: v_or3_b32 v31, v0, v1, v2
794 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr15
795 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
796 ; WAVE32-O0-NEXT: v_mov_b32_e32 v0, s18
797 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
798 ; WAVE32-O0-NEXT: v_mov_b32_e32 v1, s18
799 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
800 ; WAVE32-O0-NEXT: v_mov_b32_e32 v2, s18
801 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
802 ; WAVE32-O0-NEXT: v_mov_b32_e32 v3, s18
803 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
804 ; WAVE32-O0-NEXT: v_mov_b32_e32 v4, s18
805 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
806 ; WAVE32-O0-NEXT: v_mov_b32_e32 v5, s18
807 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
808 ; WAVE32-O0-NEXT: v_mov_b32_e32 v6, s18
809 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
810 ; WAVE32-O0-NEXT: v_mov_b32_e32 v7, s18
811 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
812 ; WAVE32-O0-NEXT: v_mov_b32_e32 v8, s18
813 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
814 ; WAVE32-O0-NEXT: v_mov_b32_e32 v9, s18
815 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
816 ; WAVE32-O0-NEXT: v_mov_b32_e32 v10, s18
817 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
818 ; WAVE32-O0-NEXT: v_mov_b32_e32 v11, s18
819 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
820 ; WAVE32-O0-NEXT: v_mov_b32_e32 v12, s18
821 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
822 ; WAVE32-O0-NEXT: v_mov_b32_e32 v13, s18
823 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
824 ; WAVE32-O0-NEXT: v_mov_b32_e32 v14, s18
825 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
826 ; WAVE32-O0-NEXT: v_mov_b32_e32 v15, s18
827 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
828 ; WAVE32-O0-NEXT: v_mov_b32_e32 v16, s18
829 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
830 ; WAVE32-O0-NEXT: v_mov_b32_e32 v17, s18
831 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
832 ; WAVE32-O0-NEXT: v_mov_b32_e32 v18, s18
833 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
834 ; WAVE32-O0-NEXT: v_mov_b32_e32 v19, s18
835 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
836 ; WAVE32-O0-NEXT: v_mov_b32_e32 v20, s18
837 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
838 ; WAVE32-O0-NEXT: v_mov_b32_e32 v21, s18
839 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
840 ; WAVE32-O0-NEXT: v_mov_b32_e32 v22, s18
841 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
842 ; WAVE32-O0-NEXT: v_mov_b32_e32 v23, s18
843 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
844 ; WAVE32-O0-NEXT: v_mov_b32_e32 v24, s18
845 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
846 ; WAVE32-O0-NEXT: v_mov_b32_e32 v25, s18
847 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
848 ; WAVE32-O0-NEXT: v_mov_b32_e32 v26, s18
849 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
850 ; WAVE32-O0-NEXT: v_mov_b32_e32 v27, s18
851 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
852 ; WAVE32-O0-NEXT: v_mov_b32_e32 v28, s18
853 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
854 ; WAVE32-O0-NEXT: v_mov_b32_e32 v29, s18
855 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
856 ; WAVE32-O0-NEXT: v_mov_b32_e32 v30, s18
857 ; WAVE32-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
858 ; WAVE32-O0-NEXT: s_or_saveexec_b32 s19, -1
859 ; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[20:23], 0 offset:132 ; 4-byte Folded Reload
860 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s19
861 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
862 ; WAVE32-O0-NEXT: v_readlane_b32 s1, v0, 1
863 ; WAVE32-O0-NEXT: v_readlane_b32 s0, v0, 0
864 ; WAVE32-O0-NEXT: ;;#ASMSTART
865 ; WAVE32-O0-NEXT: ; use s1
866 ; WAVE32-O0-NEXT: ;;#ASMEND
867 ; WAVE32-O0-NEXT: s_mov_b32 s32, s0
868 ; WAVE32-O0-NEXT: ; kill: killed $vgpr0
869 ; WAVE32-O0-NEXT: s_endpgm
871 ; WAVE64-O0-LABEL: kernel_stacksave_stackrestore_call_with_stack_objects:
872 ; WAVE64-O0: ; %bb.0:
873 ; WAVE64-O0-NEXT: s_mov_b32 s32, 0x2400
874 ; WAVE64-O0-NEXT: s_getpc_b64 s[24:25]
875 ; WAVE64-O0-NEXT: s_mov_b32 s24, s0
876 ; WAVE64-O0-NEXT: s_load_dwordx4 s[24:27], s[24:25], 0x0
877 ; WAVE64-O0-NEXT: s_waitcnt lgkmcnt(0)
878 ; WAVE64-O0-NEXT: s_add_u32 s24, s24, s11
879 ; WAVE64-O0-NEXT: s_addc_u32 s25, s25, 0
880 ; WAVE64-O0-NEXT: ; implicit-def: $vgpr3 : SGPR spill to VGPR lane
881 ; WAVE64-O0-NEXT: s_mov_b32 s14, s10
882 ; WAVE64-O0-NEXT: s_mov_b32 s13, s9
883 ; WAVE64-O0-NEXT: s_mov_b32 s12, s8
884 ; WAVE64-O0-NEXT: s_mov_b64 s[10:11], s[6:7]
885 ; WAVE64-O0-NEXT: s_mov_b64 s[8:9], s[4:5]
886 ; WAVE64-O0-NEXT: s_mov_b64 s[6:7], s[2:3]
887 ; WAVE64-O0-NEXT: s_mov_b64 s[4:5], s[0:1]
888 ; WAVE64-O0-NEXT: s_mov_b32 s0, s32
889 ; WAVE64-O0-NEXT: v_writelane_b32 v3, s0, 0
890 ; WAVE64-O0-NEXT: s_lshr_b32 s0, s0, 6
891 ; WAVE64-O0-NEXT: v_writelane_b32 v3, s0, 1
892 ; WAVE64-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
893 ; WAVE64-O0-NEXT: buffer_store_dword v3, off, s[24:27], 0 offset:132 ; 4-byte Folded Spill
894 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[20:21]
895 ; WAVE64-O0-NEXT: v_mov_b32_e32 v3, 42
896 ; WAVE64-O0-NEXT: buffer_store_dword v3, off, s[24:27], 0 offset:4
897 ; WAVE64-O0-NEXT: s_waitcnt_vscnt null, 0x0
898 ; WAVE64-O0-NEXT: s_mov_b64 s[0:1], s[24:25]
899 ; WAVE64-O0-NEXT: s_mov_b64 s[2:3], s[26:27]
900 ; WAVE64-O0-NEXT: s_mov_b32 s15, s32
901 ; WAVE64-O0-NEXT: v_mov_b32_e32 v3, 17
902 ; WAVE64-O0-NEXT: buffer_store_dword v3, off, s[24:27], s15 offset:4
903 ; WAVE64-O0-NEXT: s_mov_b32 s15, stack_passed_argument@abs32@hi
904 ; WAVE64-O0-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
905 ; WAVE64-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
906 ; WAVE64-O0-NEXT: s_mov_b32 s17, s15
907 ; WAVE64-O0-NEXT: s_mov_b32 s15, 20
908 ; WAVE64-O0-NEXT: v_lshlrev_b32_e64 v2, s15, v2
909 ; WAVE64-O0-NEXT: s_mov_b32 s15, 10
910 ; WAVE64-O0-NEXT: v_lshlrev_b32_e64 v1, s15, v1
911 ; WAVE64-O0-NEXT: v_or3_b32 v31, v0, v1, v2
912 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr15
913 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
914 ; WAVE64-O0-NEXT: v_mov_b32_e32 v0, s18
915 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
916 ; WAVE64-O0-NEXT: v_mov_b32_e32 v1, s18
917 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
918 ; WAVE64-O0-NEXT: v_mov_b32_e32 v2, s18
919 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
920 ; WAVE64-O0-NEXT: v_mov_b32_e32 v3, s18
921 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
922 ; WAVE64-O0-NEXT: v_mov_b32_e32 v4, s18
923 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
924 ; WAVE64-O0-NEXT: v_mov_b32_e32 v5, s18
925 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
926 ; WAVE64-O0-NEXT: v_mov_b32_e32 v6, s18
927 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
928 ; WAVE64-O0-NEXT: v_mov_b32_e32 v7, s18
929 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
930 ; WAVE64-O0-NEXT: v_mov_b32_e32 v8, s18
931 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
932 ; WAVE64-O0-NEXT: v_mov_b32_e32 v9, s18
933 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
934 ; WAVE64-O0-NEXT: v_mov_b32_e32 v10, s18
935 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
936 ; WAVE64-O0-NEXT: v_mov_b32_e32 v11, s18
937 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
938 ; WAVE64-O0-NEXT: v_mov_b32_e32 v12, s18
939 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
940 ; WAVE64-O0-NEXT: v_mov_b32_e32 v13, s18
941 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
942 ; WAVE64-O0-NEXT: v_mov_b32_e32 v14, s18
943 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
944 ; WAVE64-O0-NEXT: v_mov_b32_e32 v15, s18
945 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
946 ; WAVE64-O0-NEXT: v_mov_b32_e32 v16, s18
947 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
948 ; WAVE64-O0-NEXT: v_mov_b32_e32 v17, s18
949 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
950 ; WAVE64-O0-NEXT: v_mov_b32_e32 v18, s18
951 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
952 ; WAVE64-O0-NEXT: v_mov_b32_e32 v19, s18
953 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
954 ; WAVE64-O0-NEXT: v_mov_b32_e32 v20, s18
955 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
956 ; WAVE64-O0-NEXT: v_mov_b32_e32 v21, s18
957 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
958 ; WAVE64-O0-NEXT: v_mov_b32_e32 v22, s18
959 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
960 ; WAVE64-O0-NEXT: v_mov_b32_e32 v23, s18
961 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
962 ; WAVE64-O0-NEXT: v_mov_b32_e32 v24, s18
963 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
964 ; WAVE64-O0-NEXT: v_mov_b32_e32 v25, s18
965 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
966 ; WAVE64-O0-NEXT: v_mov_b32_e32 v26, s18
967 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
968 ; WAVE64-O0-NEXT: v_mov_b32_e32 v27, s18
969 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
970 ; WAVE64-O0-NEXT: v_mov_b32_e32 v28, s18
971 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
972 ; WAVE64-O0-NEXT: v_mov_b32_e32 v29, s18
973 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
974 ; WAVE64-O0-NEXT: v_mov_b32_e32 v30, s18
975 ; WAVE64-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
976 ; WAVE64-O0-NEXT: s_or_saveexec_b64 s[20:21], -1
977 ; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[24:27], 0 offset:132 ; 4-byte Folded Reload
978 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[20:21]
979 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
980 ; WAVE64-O0-NEXT: v_readlane_b32 s1, v0, 1
981 ; WAVE64-O0-NEXT: v_readlane_b32 s0, v0, 0
982 ; WAVE64-O0-NEXT: ;;#ASMSTART
983 ; WAVE64-O0-NEXT: ; use s1
984 ; WAVE64-O0-NEXT: ;;#ASMEND
985 ; WAVE64-O0-NEXT: s_mov_b32 s32, s0
986 ; WAVE64-O0-NEXT: ; kill: killed $vgpr0
987 ; WAVE64-O0-NEXT: s_endpgm
988 %alloca = alloca [32 x i32], addrspace(5)
989 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
990 store volatile i32 42, ptr addrspace(5) %alloca
991 call void @stack_passed_argument([32 x i32] poison, i32 17)
992 call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
993 call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
997 define void @func_stacksave_stackrestore_call_with_stack_objects() {
998 ; WAVE32-OPT-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
999 ; WAVE32-OPT: ; %bb.0:
1000 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
1001 ; WAVE32-OPT-NEXT: s_mov_b32 s8, s33
1002 ; WAVE32-OPT-NEXT: s_mov_b32 s33, s32
1003 ; WAVE32-OPT-NEXT: s_xor_saveexec_b32 s4, -1
1004 ; WAVE32-OPT-NEXT: buffer_store_dword v31, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
1005 ; WAVE32-OPT-NEXT: s_mov_b32 exec_lo, s4
1006 ; WAVE32-OPT-NEXT: v_writelane_b32 v31, s30, 0
1007 ; WAVE32-OPT-NEXT: v_mov_b32_e32 v0, 42
1008 ; WAVE32-OPT-NEXT: v_mov_b32_e32 v1, 17
1009 ; WAVE32-OPT-NEXT: s_addk_i32 s32, 0x1200
1010 ; WAVE32-OPT-NEXT: s_mov_b32 s5, stack_passed_argument@abs32@hi
1011 ; WAVE32-OPT-NEXT: s_mov_b32 s6, s32
1012 ; WAVE32-OPT-NEXT: s_mov_b32 s4, stack_passed_argument@abs32@lo
1013 ; WAVE32-OPT-NEXT: v_writelane_b32 v31, s31, 1
1014 ; WAVE32-OPT-NEXT: s_lshr_b32 s7, s6, 5
1015 ; WAVE32-OPT-NEXT: buffer_store_dword v0, off, s[0:3], s33
1016 ; WAVE32-OPT-NEXT: s_waitcnt_vscnt null, 0x0
1017 ; WAVE32-OPT-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4
1018 ; WAVE32-OPT-NEXT: s_swappc_b64 s[30:31], s[4:5]
1019 ; WAVE32-OPT-NEXT: ;;#ASMSTART
1020 ; WAVE32-OPT-NEXT: ; use s7
1021 ; WAVE32-OPT-NEXT: ;;#ASMEND
1022 ; WAVE32-OPT-NEXT: s_mov_b32 s32, s6
1023 ; WAVE32-OPT-NEXT: v_readlane_b32 s31, v31, 1
1024 ; WAVE32-OPT-NEXT: v_readlane_b32 s30, v31, 0
1025 ; WAVE32-OPT-NEXT: s_xor_saveexec_b32 s4, -1
1026 ; WAVE32-OPT-NEXT: buffer_load_dword v31, off, s[0:3], s33 offset:128 ; 4-byte Folded Reload
1027 ; WAVE32-OPT-NEXT: s_mov_b32 exec_lo, s4
1028 ; WAVE32-OPT-NEXT: s_addk_i32 s32, 0xee00
1029 ; WAVE32-OPT-NEXT: s_mov_b32 s33, s8
1030 ; WAVE32-OPT-NEXT: s_waitcnt vmcnt(0)
1031 ; WAVE32-OPT-NEXT: s_setpc_b64 s[30:31]
1033 ; WAVE64-OPT-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
1034 ; WAVE64-OPT: ; %bb.0:
1035 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
1036 ; WAVE64-OPT-NEXT: s_mov_b32 s8, s33
1037 ; WAVE64-OPT-NEXT: s_mov_b32 s33, s32
1038 ; WAVE64-OPT-NEXT: s_xor_saveexec_b64 s[4:5], -1
1039 ; WAVE64-OPT-NEXT: buffer_store_dword v31, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
1040 ; WAVE64-OPT-NEXT: s_mov_b64 exec, s[4:5]
1041 ; WAVE64-OPT-NEXT: v_writelane_b32 v31, s30, 0
1042 ; WAVE64-OPT-NEXT: v_mov_b32_e32 v0, 42
1043 ; WAVE64-OPT-NEXT: v_mov_b32_e32 v1, 17
1044 ; WAVE64-OPT-NEXT: s_addk_i32 s32, 0x2400
1045 ; WAVE64-OPT-NEXT: s_mov_b32 s5, stack_passed_argument@abs32@hi
1046 ; WAVE64-OPT-NEXT: s_mov_b32 s6, s32
1047 ; WAVE64-OPT-NEXT: s_mov_b32 s4, stack_passed_argument@abs32@lo
1048 ; WAVE64-OPT-NEXT: v_writelane_b32 v31, s31, 1
1049 ; WAVE64-OPT-NEXT: s_lshr_b32 s7, s6, 6
1050 ; WAVE64-OPT-NEXT: buffer_store_dword v0, off, s[0:3], s33
1051 ; WAVE64-OPT-NEXT: s_waitcnt_vscnt null, 0x0
1052 ; WAVE64-OPT-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:4
1053 ; WAVE64-OPT-NEXT: s_swappc_b64 s[30:31], s[4:5]
1054 ; WAVE64-OPT-NEXT: ;;#ASMSTART
1055 ; WAVE64-OPT-NEXT: ; use s7
1056 ; WAVE64-OPT-NEXT: ;;#ASMEND
1057 ; WAVE64-OPT-NEXT: s_mov_b32 s32, s6
1058 ; WAVE64-OPT-NEXT: v_readlane_b32 s31, v31, 1
1059 ; WAVE64-OPT-NEXT: v_readlane_b32 s30, v31, 0
1060 ; WAVE64-OPT-NEXT: s_xor_saveexec_b64 s[4:5], -1
1061 ; WAVE64-OPT-NEXT: buffer_load_dword v31, off, s[0:3], s33 offset:128 ; 4-byte Folded Reload
1062 ; WAVE64-OPT-NEXT: s_mov_b64 exec, s[4:5]
1063 ; WAVE64-OPT-NEXT: s_addk_i32 s32, 0xdc00
1064 ; WAVE64-OPT-NEXT: s_mov_b32 s33, s8
1065 ; WAVE64-OPT-NEXT: s_waitcnt vmcnt(0)
1066 ; WAVE64-OPT-NEXT: s_setpc_b64 s[30:31]
1068 ; WAVE32-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
1069 ; WAVE32-O0: ; %bb.0:
1070 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
1071 ; WAVE32-O0-NEXT: s_mov_b32 s25, s33
1072 ; WAVE32-O0-NEXT: s_mov_b32 s33, s32
1073 ; WAVE32-O0-NEXT: s_xor_saveexec_b32 s16, -1
1074 ; WAVE32-O0-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
1075 ; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Spill
1076 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s16
1077 ; WAVE32-O0-NEXT: s_add_i32 s32, s32, 0x1200
1078 ; WAVE32-O0-NEXT: ; implicit-def: $vgpr0 : SGPR spill to VGPR lane
1079 ; WAVE32-O0-NEXT: v_writelane_b32 v32, s30, 0
1080 ; WAVE32-O0-NEXT: v_writelane_b32 v32, s31, 1
1081 ; WAVE32-O0-NEXT: s_mov_b32 s16, s32
1082 ; WAVE32-O0-NEXT: v_writelane_b32 v0, s16, 0
1083 ; WAVE32-O0-NEXT: s_lshr_b32 s16, s16, 5
1084 ; WAVE32-O0-NEXT: v_writelane_b32 v0, s16, 1
1085 ; WAVE32-O0-NEXT: s_or_saveexec_b32 s24, -1
1086 ; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Spill
1087 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s24
1088 ; WAVE32-O0-NEXT: v_mov_b32_e32 v0, 42
1089 ; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33
1090 ; WAVE32-O0-NEXT: s_waitcnt_vscnt null, 0x0
1091 ; WAVE32-O0-NEXT: s_mov_b64 s[22:23], s[2:3]
1092 ; WAVE32-O0-NEXT: s_mov_b64 s[20:21], s[0:1]
1093 ; WAVE32-O0-NEXT: s_mov_b32 s16, s32
1094 ; WAVE32-O0-NEXT: v_mov_b32_e32 v0, 17
1095 ; WAVE32-O0-NEXT: buffer_store_dword v0, off, s[0:3], s16 offset:4
1096 ; WAVE32-O0-NEXT: s_mov_b32 s18, stack_passed_argument@abs32@hi
1097 ; WAVE32-O0-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
1098 ; WAVE32-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
1099 ; WAVE32-O0-NEXT: s_mov_b32 s17, s18
1100 ; WAVE32-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
1101 ; WAVE32-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
1102 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1103 ; WAVE32-O0-NEXT: v_mov_b32_e32 v0, s18
1104 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1105 ; WAVE32-O0-NEXT: v_mov_b32_e32 v1, s18
1106 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1107 ; WAVE32-O0-NEXT: v_mov_b32_e32 v2, s18
1108 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1109 ; WAVE32-O0-NEXT: v_mov_b32_e32 v3, s18
1110 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1111 ; WAVE32-O0-NEXT: v_mov_b32_e32 v4, s18
1112 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1113 ; WAVE32-O0-NEXT: v_mov_b32_e32 v5, s18
1114 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1115 ; WAVE32-O0-NEXT: v_mov_b32_e32 v6, s18
1116 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1117 ; WAVE32-O0-NEXT: v_mov_b32_e32 v7, s18
1118 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1119 ; WAVE32-O0-NEXT: v_mov_b32_e32 v8, s18
1120 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1121 ; WAVE32-O0-NEXT: v_mov_b32_e32 v9, s18
1122 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1123 ; WAVE32-O0-NEXT: v_mov_b32_e32 v10, s18
1124 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1125 ; WAVE32-O0-NEXT: v_mov_b32_e32 v11, s18
1126 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1127 ; WAVE32-O0-NEXT: v_mov_b32_e32 v12, s18
1128 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1129 ; WAVE32-O0-NEXT: v_mov_b32_e32 v13, s18
1130 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1131 ; WAVE32-O0-NEXT: v_mov_b32_e32 v14, s18
1132 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1133 ; WAVE32-O0-NEXT: v_mov_b32_e32 v15, s18
1134 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1135 ; WAVE32-O0-NEXT: v_mov_b32_e32 v16, s18
1136 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1137 ; WAVE32-O0-NEXT: v_mov_b32_e32 v17, s18
1138 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1139 ; WAVE32-O0-NEXT: v_mov_b32_e32 v18, s18
1140 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1141 ; WAVE32-O0-NEXT: v_mov_b32_e32 v19, s18
1142 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1143 ; WAVE32-O0-NEXT: v_mov_b32_e32 v20, s18
1144 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1145 ; WAVE32-O0-NEXT: v_mov_b32_e32 v21, s18
1146 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1147 ; WAVE32-O0-NEXT: v_mov_b32_e32 v22, s18
1148 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1149 ; WAVE32-O0-NEXT: v_mov_b32_e32 v23, s18
1150 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1151 ; WAVE32-O0-NEXT: v_mov_b32_e32 v24, s18
1152 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1153 ; WAVE32-O0-NEXT: v_mov_b32_e32 v25, s18
1154 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1155 ; WAVE32-O0-NEXT: v_mov_b32_e32 v26, s18
1156 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1157 ; WAVE32-O0-NEXT: v_mov_b32_e32 v27, s18
1158 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1159 ; WAVE32-O0-NEXT: v_mov_b32_e32 v28, s18
1160 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1161 ; WAVE32-O0-NEXT: v_mov_b32_e32 v29, s18
1162 ; WAVE32-O0-NEXT: ; implicit-def: $sgpr18
1163 ; WAVE32-O0-NEXT: v_mov_b32_e32 v30, s18
1164 ; WAVE32-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
1165 ; WAVE32-O0-NEXT: s_or_saveexec_b32 s24, -1
1166 ; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
1167 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s24
1168 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
1169 ; WAVE32-O0-NEXT: v_readlane_b32 s5, v0, 1
1170 ; WAVE32-O0-NEXT: v_readlane_b32 s4, v0, 0
1171 ; WAVE32-O0-NEXT: ;;#ASMSTART
1172 ; WAVE32-O0-NEXT: ; use s5
1173 ; WAVE32-O0-NEXT: ;;#ASMEND
1174 ; WAVE32-O0-NEXT: s_mov_b32 s32, s4
1175 ; WAVE32-O0-NEXT: v_readlane_b32 s31, v32, 1
1176 ; WAVE32-O0-NEXT: v_readlane_b32 s30, v32, 0
1177 ; WAVE32-O0-NEXT: ; kill: killed $vgpr0
1178 ; WAVE32-O0-NEXT: s_xor_saveexec_b32 s4, -1
1179 ; WAVE32-O0-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Reload
1180 ; WAVE32-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
1181 ; WAVE32-O0-NEXT: s_mov_b32 exec_lo, s4
1182 ; WAVE32-O0-NEXT: s_add_i32 s32, s32, 0xffffee00
1183 ; WAVE32-O0-NEXT: s_mov_b32 s33, s25
1184 ; WAVE32-O0-NEXT: s_waitcnt vmcnt(0)
1185 ; WAVE32-O0-NEXT: s_setpc_b64 s[30:31]
1187 ; WAVE64-O0-LABEL: func_stacksave_stackrestore_call_with_stack_objects:
1188 ; WAVE64-O0: ; %bb.0:
1189 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
1190 ; WAVE64-O0-NEXT: s_mov_b32 s19, s33
1191 ; WAVE64-O0-NEXT: s_mov_b32 s33, s32
1192 ; WAVE64-O0-NEXT: s_xor_saveexec_b64 s[16:17], -1
1193 ; WAVE64-O0-NEXT: buffer_store_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Spill
1194 ; WAVE64-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Spill
1195 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[16:17]
1196 ; WAVE64-O0-NEXT: s_add_i32 s32, s32, 0x2400
1197 ; WAVE64-O0-NEXT: ; implicit-def: $vgpr0 : SGPR spill to VGPR lane
1198 ; WAVE64-O0-NEXT: v_writelane_b32 v32, s30, 0
1199 ; WAVE64-O0-NEXT: v_writelane_b32 v32, s31, 1
1200 ; WAVE64-O0-NEXT: s_mov_b32 s16, s32
1201 ; WAVE64-O0-NEXT: v_writelane_b32 v0, s16, 0
1202 ; WAVE64-O0-NEXT: s_lshr_b32 s16, s16, 6
1203 ; WAVE64-O0-NEXT: v_writelane_b32 v0, s16, 1
1204 ; WAVE64-O0-NEXT: s_or_saveexec_b64 s[26:27], -1
1205 ; WAVE64-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Spill
1206 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[26:27]
1207 ; WAVE64-O0-NEXT: v_mov_b32_e32 v0, 42
1208 ; WAVE64-O0-NEXT: buffer_store_dword v0, off, s[0:3], s33
1209 ; WAVE64-O0-NEXT: s_waitcnt_vscnt null, 0x0
1210 ; WAVE64-O0-NEXT: s_mov_b64 s[22:23], s[2:3]
1211 ; WAVE64-O0-NEXT: s_mov_b64 s[20:21], s[0:1]
1212 ; WAVE64-O0-NEXT: s_mov_b32 s16, s32
1213 ; WAVE64-O0-NEXT: v_mov_b32_e32 v0, 17
1214 ; WAVE64-O0-NEXT: buffer_store_dword v0, off, s[0:3], s16 offset:4
1215 ; WAVE64-O0-NEXT: s_mov_b32 s18, stack_passed_argument@abs32@hi
1216 ; WAVE64-O0-NEXT: s_mov_b32 s16, stack_passed_argument@abs32@lo
1217 ; WAVE64-O0-NEXT: ; kill: def $sgpr16 killed $sgpr16 def $sgpr16_sgpr17
1218 ; WAVE64-O0-NEXT: s_mov_b32 s17, s18
1219 ; WAVE64-O0-NEXT: s_mov_b64 s[0:1], s[20:21]
1220 ; WAVE64-O0-NEXT: s_mov_b64 s[2:3], s[22:23]
1221 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1222 ; WAVE64-O0-NEXT: v_mov_b32_e32 v0, s18
1223 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1224 ; WAVE64-O0-NEXT: v_mov_b32_e32 v1, s18
1225 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1226 ; WAVE64-O0-NEXT: v_mov_b32_e32 v2, s18
1227 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1228 ; WAVE64-O0-NEXT: v_mov_b32_e32 v3, s18
1229 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1230 ; WAVE64-O0-NEXT: v_mov_b32_e32 v4, s18
1231 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1232 ; WAVE64-O0-NEXT: v_mov_b32_e32 v5, s18
1233 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1234 ; WAVE64-O0-NEXT: v_mov_b32_e32 v6, s18
1235 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1236 ; WAVE64-O0-NEXT: v_mov_b32_e32 v7, s18
1237 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1238 ; WAVE64-O0-NEXT: v_mov_b32_e32 v8, s18
1239 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1240 ; WAVE64-O0-NEXT: v_mov_b32_e32 v9, s18
1241 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1242 ; WAVE64-O0-NEXT: v_mov_b32_e32 v10, s18
1243 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1244 ; WAVE64-O0-NEXT: v_mov_b32_e32 v11, s18
1245 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1246 ; WAVE64-O0-NEXT: v_mov_b32_e32 v12, s18
1247 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1248 ; WAVE64-O0-NEXT: v_mov_b32_e32 v13, s18
1249 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1250 ; WAVE64-O0-NEXT: v_mov_b32_e32 v14, s18
1251 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1252 ; WAVE64-O0-NEXT: v_mov_b32_e32 v15, s18
1253 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1254 ; WAVE64-O0-NEXT: v_mov_b32_e32 v16, s18
1255 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1256 ; WAVE64-O0-NEXT: v_mov_b32_e32 v17, s18
1257 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1258 ; WAVE64-O0-NEXT: v_mov_b32_e32 v18, s18
1259 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1260 ; WAVE64-O0-NEXT: v_mov_b32_e32 v19, s18
1261 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1262 ; WAVE64-O0-NEXT: v_mov_b32_e32 v20, s18
1263 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1264 ; WAVE64-O0-NEXT: v_mov_b32_e32 v21, s18
1265 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1266 ; WAVE64-O0-NEXT: v_mov_b32_e32 v22, s18
1267 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1268 ; WAVE64-O0-NEXT: v_mov_b32_e32 v23, s18
1269 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1270 ; WAVE64-O0-NEXT: v_mov_b32_e32 v24, s18
1271 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1272 ; WAVE64-O0-NEXT: v_mov_b32_e32 v25, s18
1273 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1274 ; WAVE64-O0-NEXT: v_mov_b32_e32 v26, s18
1275 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1276 ; WAVE64-O0-NEXT: v_mov_b32_e32 v27, s18
1277 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1278 ; WAVE64-O0-NEXT: v_mov_b32_e32 v28, s18
1279 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1280 ; WAVE64-O0-NEXT: v_mov_b32_e32 v29, s18
1281 ; WAVE64-O0-NEXT: ; implicit-def: $sgpr18
1282 ; WAVE64-O0-NEXT: v_mov_b32_e32 v30, s18
1283 ; WAVE64-O0-NEXT: s_swappc_b64 s[30:31], s[16:17]
1284 ; WAVE64-O0-NEXT: s_or_saveexec_b64 s[26:27], -1
1285 ; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:132 ; 4-byte Folded Reload
1286 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[26:27]
1287 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
1288 ; WAVE64-O0-NEXT: v_readlane_b32 s5, v0, 1
1289 ; WAVE64-O0-NEXT: v_readlane_b32 s4, v0, 0
1290 ; WAVE64-O0-NEXT: ;;#ASMSTART
1291 ; WAVE64-O0-NEXT: ; use s5
1292 ; WAVE64-O0-NEXT: ;;#ASMEND
1293 ; WAVE64-O0-NEXT: s_mov_b32 s32, s4
1294 ; WAVE64-O0-NEXT: v_readlane_b32 s31, v32, 1
1295 ; WAVE64-O0-NEXT: v_readlane_b32 s30, v32, 0
1296 ; WAVE64-O0-NEXT: ; kill: killed $vgpr0
1297 ; WAVE64-O0-NEXT: s_xor_saveexec_b64 s[4:5], -1
1298 ; WAVE64-O0-NEXT: buffer_load_dword v32, off, s[0:3], s33 offset:128 ; 4-byte Folded Reload
1299 ; WAVE64-O0-NEXT: buffer_load_dword v0, off, s[0:3], s33 offset:136 ; 4-byte Folded Reload
1300 ; WAVE64-O0-NEXT: s_mov_b64 exec, s[4:5]
1301 ; WAVE64-O0-NEXT: s_add_i32 s32, s32, 0xffffdc00
1302 ; WAVE64-O0-NEXT: s_mov_b32 s33, s19
1303 ; WAVE64-O0-NEXT: s_waitcnt vmcnt(0)
1304 ; WAVE64-O0-NEXT: s_setpc_b64 s[30:31]
1305 %alloca = alloca [32 x i32], addrspace(5)
1306 %stacksave = call ptr addrspace(5) @llvm.stacksave.p5()
1307 store volatile i32 42, ptr addrspace(5) %alloca
1308 call void @stack_passed_argument([32 x i32] poison, i32 17)
1309 call void asm sideeffect "; use $0", "s"(ptr addrspace(5) %stacksave)
1310 call void @llvm.stackrestore.p5(ptr addrspace(5) %stacksave)
1313 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: