1 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
3 ; Check that we properly realign the stack. While 4-byte access is all
4 ; that is ever needed, some transformations rely on the known bits from the alignment of the pointer (e.g.
8 ; 4 byte emergency stack slot
9 ; = 144 bytes with padding between them
11 ; GCN-LABEL: {{^}}needs_align16_default_stack_align:
12 ; GCN: s_sub_u32 [[SUB:s[0-9]+]], s32, s33
13 ; GCN-DAG: v_lshlrev_b32_e32 [[SCALED_IDX:v[0-9]+]], 4, v0
14 ; GCN-DAG: v_lshrrev_b32_e64 [[FRAMEDIFF:v[0-9]+]], 6, [[SUB]]
15 ; GCN: v_add_u32_e32 [[FI:v[0-9]+]], vcc, [[FRAMEDIFF]], [[SCALED_IDX]]
19 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
20 ; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
21 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
22 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
23 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
27 ; GCN: ; ScratchSize: 144
28 define void @needs_align16_default_stack_align(i32 %idx) #0 {
29 %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
30 %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
31 store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16
35 ; GCN-LABEL: {{^}}needs_align16_stack_align4:
36 ; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x3c0{{$}}
37 ; GCN: s_and_b32 s34, [[SCRATCH_REG]], 0xfffffc00
38 ; GCN: s_add_u32 s32, s32, 0x2800{{$}}
40 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
41 ; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
42 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
43 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
44 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
46 ; GCN: s_sub_u32 s32, s32, 0x2800
48 ; GCN: ; ScratchSize: 160
49 define void @needs_align16_stack_align4(i32 %idx) #2 {
50 %alloca.align16 = alloca [8 x <4 x i32>], align 16, addrspace(5)
51 %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
52 store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 16
56 ; GCN-LABEL: {{^}}needs_align32:
57 ; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0x7c0{{$}}
58 ; GCN: s_and_b32 s34, [[SCRATCH_REG]], 0xfffff800
59 ; GCN: s_add_u32 s32, s32, 0x3000{{$}}
61 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
62 ; GCN: v_or_b32_e32 v{{[0-9]+}}, 12
63 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
64 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
65 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
67 ; GCN: s_sub_u32 s32, s32, 0x3000
69 ; GCN: ; ScratchSize: 192
70 define void @needs_align32(i32 %idx) #0 {
71 %alloca.align16 = alloca [8 x <4 x i32>], align 32, addrspace(5)
72 %gep0 = getelementptr inbounds [8 x <4 x i32>], [8 x <4 x i32>] addrspace(5)* %alloca.align16, i32 0, i32 %idx
73 store volatile <4 x i32> <i32 1, i32 2, i32 3, i32 4>, <4 x i32> addrspace(5)* %gep0, align 32
77 ; GCN-LABEL: {{^}}force_realign4:
78 ; GCN: s_add_u32 [[SCRATCH_REG:s[0-9]+]], s32, 0xc0{{$}}
79 ; GCN: s_and_b32 s34, [[SCRATCH_REG]], 0xffffff00
80 ; GCN: s_add_u32 s32, s32, 0xd00{{$}}
82 ; GCN: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[0:3], s33 offen
83 ; GCN: s_sub_u32 s32, s32, 0xd00
85 ; GCN: ; ScratchSize: 52
86 define void @force_realign4(i32 %idx) #1 {
87 %alloca.align16 = alloca [8 x i32], align 4, addrspace(5)
88 %gep0 = getelementptr inbounds [8 x i32], [8 x i32] addrspace(5)* %alloca.align16, i32 0, i32 %idx
89 store volatile i32 3, i32 addrspace(5)* %gep0, align 4
93 ; GCN-LABEL: {{^}}kernel_call_align16_from_8:
94 ; GCN: s_mov_b32 s33, s7{{$}}
95 ; GCN-NEXT: s_add_u32 s32, s33, 0x400{{$}}
98 define amdgpu_kernel void @kernel_call_align16_from_8() #0 {
99 %alloca = alloca i32, align 4, addrspace(5)
100 store volatile i32 2, i32 addrspace(5)* %alloca
101 call void @needs_align16_default_stack_align(i32 1)
105 ; The call sequence should keep the stack on call aligned to 4
106 ; GCN-LABEL: {{^}}kernel_call_align16_from_5:
107 ; GCN: s_mov_b32 s33, s7{{$}}
108 ; GCN-NEXT: s_add_u32 s32, s33, 0x400
110 define amdgpu_kernel void @kernel_call_align16_from_5() {
111 %alloca0 = alloca i8, align 1, addrspace(5)
112 store volatile i8 2, i8 addrspace(5)* %alloca0
114 call void @needs_align16_default_stack_align(i32 1)
118 ; GCN-LABEL: {{^}}kernel_call_align4_from_5:
119 ; GCN: s_mov_b32 s33, s7{{$}}
120 ; GCN: s_add_u32 s32, s33, 0x400
122 define amdgpu_kernel void @kernel_call_align4_from_5() {
123 %alloca0 = alloca i8, align 1, addrspace(5)
124 store volatile i8 2, i8 addrspace(5)* %alloca0
126 call void @needs_align16_stack_align4(i32 1)
130 ; GCN-LABEL: {{^}}default_realign_align128:
131 ; GCN: s_add_u32 [[TMP:s[0-9]+]], s32, 0x1fc0
132 ; GCN-NEXT: s_mov_b32 [[FP_COPY:s[0-9]+]], s34
133 ; GCN-NEXT: s_and_b32 s34, [[TMP]], 0xffffe000
134 ; GCN-NEXT: s_add_u32 s32, s32, 0x4000
136 ; GCN: buffer_store_dword v0, off, s[0:3], s34{{$}}
137 ; GCN: s_sub_u32 s32, s32, 0x4000
138 ; GCN: s_mov_b32 s34, [[FP_COPY]]
139 define void @default_realign_align128(i32 %idx) #0 {
140 %alloca.align = alloca i32, align 128, addrspace(5)
141 store volatile i32 9, i32 addrspace(5)* %alloca.align, align 128
145 ; GCN-LABEL: {{^}}disable_realign_align128:
147 ; GCN: buffer_store_dword v0, off, s[0:3], s32{{$}}
149 define void @disable_realign_align128(i32 %idx) #3 {
150 %alloca.align = alloca i32, align 128, addrspace(5)
151 store volatile i32 9, i32 addrspace(5)* %alloca.align, align 128
155 attributes #0 = { noinline nounwind }
156 attributes #1 = { noinline nounwind "stackrealign" }
157 attributes #2 = { noinline nounwind alignstack=4 }
158 attributes #3 = { noinline nounwind "no-realign-stack" }