1 ; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
2 ; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn--amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-PROMOTE %s
3 ; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -march=amdgcn < %s | FileCheck %s -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC
4 ; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -mcpu=kaveri -mattr=-unaligned-buffer-access < %s | FileCheck -enable-var-scope -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC -check-prefix=HSA-ALLOCA %s
5 ; RUN: llc -show-mc-encoding -mattr=+promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck -enable-var-scope -check-prefix=SI-PROMOTE -check-prefix=SI -check-prefix=FUNC %s
6 ; RUN: llc -show-mc-encoding -mattr=-promote-alloca -amdgpu-load-store-vectorizer=0 -enable-amdgpu-aa=0 -verify-machineinstrs -mtriple=amdgcn-amdhsa -march=amdgcn -mcpu=tonga -mattr=-unaligned-buffer-access < %s | FileCheck -enable-var-scope -check-prefix=SI-ALLOCA -check-prefix=SI -check-prefix=FUNC %s
8 ; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -enable-var-scope -check-prefix=HSAOPT -check-prefix=OPT %s
9 ; RUN: opt -S -mtriple=amdgcn-unknown-unknown -mcpu=kaveri -amdgpu-promote-alloca < %s | FileCheck -enable-var-scope -check-prefix=NOHSAOPT -check-prefix=OPT %s
11 ; RUN: llc -march=r600 -mcpu=cypress < %s | FileCheck %s -check-prefix=R600 -check-prefix=FUNC
14 ; HSAOPT: @mova_same_clause.stack = internal unnamed_addr addrspace(3) global [256 x [5 x i32]] undef, align 4
15 ; HSAOPT: @high_alignment.stack = internal unnamed_addr addrspace(3) global [256 x [8 x i32]] undef, align 16
18 ; FUNC-LABEL: {{^}}mova_same_clause:
19 ; OPT-LABEL: @mova_same_clause(
26 ; HSA-PROMOTE: .amd_kernel_code_t
27 ; HSA-PROMOTE: workgroup_group_segment_byte_size = 5120
28 ; HSA-PROMOTE: .end_amd_kernel_code_t
30 ; HSA-PROMOTE: s_load_dword s{{[0-9]+}}, s[4:5], 0x2
32 ; SI-PROMOTE: ds_write_b32
33 ; SI-PROMOTE: ds_write_b32
34 ; SI-PROMOTE: ds_read_b32
35 ; SI-PROMOTE: ds_read_b32
37 ; HSA-ALLOCA: .amd_kernel_code_t
38 ; FIXME: Creating the emergency stack slots causes us to over-estimate scratch
40 ; HSA-ALLOCA: workitem_private_segment_byte_size = 24
41 ; HSA-ALLOCA: .end_amd_kernel_code_t
43 ; HSA-ALLOCA: s_mov_b32 flat_scratch_lo, s7
44 ; HSA-ALLOCA: s_add_u32 s6, s6, s9
45 ; HSA-ALLOCA: s_lshr_b32 flat_scratch_hi, s6, 8
47 ; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ; encoding: [0x00,0x10,0x70,0xe0
48 ; SI-ALLOCA: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ; encoding: [0x00,0x10,0x70,0xe0
51 ; HSAOPT: [[DISPATCH_PTR:%[0-9]+]] = call noalias nonnull dereferenceable(64) i8 addrspace(2)* @llvm.amdgcn.dispatch.ptr()
52 ; HSAOPT: [[CAST_DISPATCH_PTR:%[0-9]+]] = bitcast i8 addrspace(2)* [[DISPATCH_PTR]] to i32 addrspace(2)*
53 ; HSAOPT: [[GEP0:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(2)* [[CAST_DISPATCH_PTR]], i64 1
54 ; HSAOPT: [[LDXY:%[0-9]+]] = load i32, i32 addrspace(2)* [[GEP0]], align 4, !invariant.load !0
55 ; HSAOPT: [[GEP1:%[0-9]+]] = getelementptr inbounds i32, i32 addrspace(2)* [[CAST_DISPATCH_PTR]], i64 2
56 ; HSAOPT: [[LDZU:%[0-9]+]] = load i32, i32 addrspace(2)* [[GEP1]], align 4, !range !1, !invariant.load !0
57 ; HSAOPT: [[EXTRACTY:%[0-9]+]] = lshr i32 [[LDXY]], 16
59 ; HSAOPT: [[WORKITEM_ID_X:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.x(), !range !2
60 ; HSAOPT: [[WORKITEM_ID_Y:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.y(), !range !2
61 ; HSAOPT: [[WORKITEM_ID_Z:%[0-9]+]] = call i32 @llvm.amdgcn.workitem.id.z(), !range !2
63 ; HSAOPT: [[Y_SIZE_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[EXTRACTY]], [[LDZU]]
64 ; HSAOPT: [[YZ_X_XID:%[0-9]+]] = mul i32 [[Y_SIZE_X_Z_SIZE]], [[WORKITEM_ID_X]]
65 ; HSAOPT: [[Y_X_Z_SIZE:%[0-9]+]] = mul nuw nsw i32 [[WORKITEM_ID_Y]], [[LDZU]]
66 ; HSAOPT: [[ADD_YZ_X_X_YZ_SIZE:%[0-9]+]] = add i32 [[YZ_X_XID]], [[Y_X_Z_SIZE]]
67 ; HSAOPT: [[ADD_ZID:%[0-9]+]] = add i32 [[ADD_YZ_X_X_YZ_SIZE]], [[WORKITEM_ID_Z]]
69 ; HSAOPT: [[LOCAL_GEP:%[0-9]+]] = getelementptr inbounds [256 x [5 x i32]], [256 x [5 x i32]] addrspace(3)* @mova_same_clause.stack, i32 0, i32 [[ADD_ZID]]
70 ; HSAOPT: %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}}
71 ; HSAOPT: %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 {{%[0-9]+}}
72 ; HSAOPT: %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 0
73 ; HSAOPT: %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32] addrspace(3)* [[LOCAL_GEP]], i32 0, i32 1
76 ; NOHSAOPT: call i32 @llvm.r600.read.local.size.y(), !range !0
77 ; NOHSAOPT: call i32 @llvm.r600.read.local.size.z(), !range !0
78 ; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.x(), !range !1
79 ; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.y(), !range !1
80 ; NOHSAOPT: call i32 @llvm.amdgcn.workitem.id.z(), !range !1
81 define amdgpu_kernel void @mova_same_clause(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
83 %stack = alloca [5 x i32], align 4
84 %0 = load i32, i32 addrspace(1)* %in, align 4
85 %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
86 store i32 4, i32* %arrayidx1, align 4
87 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
88 %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
89 %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
90 store i32 5, i32* %arrayidx3, align 4
91 %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
92 %2 = load i32, i32* %arrayidx10, align 4
93 store i32 %2, i32 addrspace(1)* %out, align 4
94 %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
95 %3 = load i32, i32* %arrayidx12
96 %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
97 store i32 %3, i32 addrspace(1)* %arrayidx13
101 ; OPT-LABEL: @high_alignment(
102 ; OPT: getelementptr inbounds [256 x [8 x i32]], [256 x [8 x i32]] addrspace(3)* @high_alignment.stack, i32 0, i32 %{{[0-9]+}}
103 define amdgpu_kernel void @high_alignment(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
105 %stack = alloca [8 x i32], align 16
106 %0 = load i32, i32 addrspace(1)* %in, align 4
107 %arrayidx1 = getelementptr inbounds [8 x i32], [8 x i32]* %stack, i32 0, i32 %0
108 store i32 4, i32* %arrayidx1, align 4
109 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
110 %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
111 %arrayidx3 = getelementptr inbounds [8 x i32], [8 x i32]* %stack, i32 0, i32 %1
112 store i32 5, i32* %arrayidx3, align 4
113 %arrayidx10 = getelementptr inbounds [8 x i32], [8 x i32]* %stack, i32 0, i32 0
114 %2 = load i32, i32* %arrayidx10, align 4
115 store i32 %2, i32 addrspace(1)* %out, align 4
116 %arrayidx12 = getelementptr inbounds [8 x i32], [8 x i32]* %stack, i32 0, i32 1
117 %3 = load i32, i32* %arrayidx12
118 %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
119 store i32 %3, i32 addrspace(1)* %arrayidx13
123 ; FUNC-LABEL: {{^}}no_replace_inbounds_gep:
124 ; OPT-LABEL: @no_replace_inbounds_gep(
125 ; OPT: alloca [5 x i32]
128 define amdgpu_kernel void @no_replace_inbounds_gep(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) #0 {
130 %stack = alloca [5 x i32], align 4
131 %0 = load i32, i32 addrspace(1)* %in, align 4
132 %arrayidx1 = getelementptr [5 x i32], [5 x i32]* %stack, i32 0, i32 %0
133 store i32 4, i32* %arrayidx1, align 4
134 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
135 %1 = load i32, i32 addrspace(1)* %arrayidx2, align 4
136 %arrayidx3 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %1
137 store i32 5, i32* %arrayidx3, align 4
138 %arrayidx10 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 0
139 %2 = load i32, i32* %arrayidx10, align 4
140 store i32 %2, i32 addrspace(1)* %out, align 4
141 %arrayidx12 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 1
142 %3 = load i32, i32* %arrayidx12
143 %arrayidx13 = getelementptr inbounds i32, i32 addrspace(1)* %out, i32 1
144 store i32 %3, i32 addrspace(1)* %arrayidx13
148 ; This test checks that the stack offset is calculated correctly for structs.
149 ; All register loads/stores should be optimized away, so there shouldn't be
150 ; any MOVA instructions.
152 ; XXX: This generated code has unnecessary MOVs, we should be able to optimize
155 ; FUNC-LABEL: {{^}}multiple_structs:
156 ; OPT-LABEL: @multiple_structs(
161 %struct.point = type { i32, i32 }
163 define amdgpu_kernel void @multiple_structs(i32 addrspace(1)* %out) #0 {
165 %a = alloca %struct.point
166 %b = alloca %struct.point
167 %a.x.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
168 %a.y.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 1
169 %b.x.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
170 %b.y.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 1
171 store i32 0, i32* %a.x.ptr
172 store i32 1, i32* %a.y.ptr
173 store i32 2, i32* %b.x.ptr
174 store i32 3, i32* %b.y.ptr
175 %a.indirect.ptr = getelementptr %struct.point, %struct.point* %a, i32 0, i32 0
176 %b.indirect.ptr = getelementptr %struct.point, %struct.point* %b, i32 0, i32 0
177 %a.indirect = load i32, i32* %a.indirect.ptr
178 %b.indirect = load i32, i32* %b.indirect.ptr
179 %0 = add i32 %a.indirect, %b.indirect
180 store i32 %0, i32 addrspace(1)* %out
184 ; Test direct access of a private array inside a loop. The private array
185 ; loads and stores should be lowered to copies, so there shouldn't be any
188 ; FUNC-LABEL: {{^}}direct_loop:
192 define amdgpu_kernel void @direct_loop(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #0 {
194 %prv_array_const = alloca [2 x i32]
195 %prv_array = alloca [2 x i32]
196 %a = load i32, i32 addrspace(1)* %in
197 %b_src_ptr = getelementptr inbounds i32, i32 addrspace(1)* %in, i32 1
198 %b = load i32, i32 addrspace(1)* %b_src_ptr
199 %a_dst_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
200 store i32 %a, i32* %a_dst_ptr
201 %b_dst_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 1
202 store i32 %b, i32* %b_dst_ptr
206 %inc = phi i32 [0, %entry], [%count, %for.body]
207 %x_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array_const, i32 0, i32 0
208 %x = load i32, i32* %x_ptr
209 %y_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
210 %y = load i32, i32* %y_ptr
212 store i32 %xy, i32* %y_ptr
213 %count = add i32 %inc, 1
214 %done = icmp eq i32 %count, 4095
215 br i1 %done, label %for.end, label %for.body
218 %value_ptr = getelementptr inbounds [2 x i32], [2 x i32]* %prv_array, i32 0, i32 0
219 %value = load i32, i32* %value_ptr
220 store i32 %value, i32 addrspace(1)* %out
224 ; FUNC-LABEL: {{^}}short_array:
228 ; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:6 ; encoding: [0x06,0x00,0x68,0xe0
229 ; SI-ALLOCA-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding: [0x04,0x00,0x68,0xe0
230 ; Loaded value is 0 or 1, so sext will become zext, so we get buffer_load_ushort instead of buffer_load_sshort.
231 ; SI-ALLOCA: buffer_load_sshort v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}}
233 ; SI-PROMOTE: s_load_dword [[IDX:s[0-9]+]]
234 ; SI-PROMOTE: s_lshl_b32 [[SCALED_IDX:s[0-9]+]], [[IDX]], 16
235 ; SI-PROMOTE: v_bfe_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[SCALED_IDX]], 16
236 define amdgpu_kernel void @short_array(i32 addrspace(1)* %out, i32 %index) #0 {
238 %0 = alloca [2 x i16]
239 %1 = getelementptr inbounds [2 x i16], [2 x i16]* %0, i32 0, i32 0
240 %2 = getelementptr inbounds [2 x i16], [2 x i16]* %0, i32 0, i32 1
243 %3 = getelementptr inbounds [2 x i16], [2 x i16]* %0, i32 0, i32 %index
244 %4 = load i16, i16* %3
245 %5 = sext i16 %4 to i32
246 store i32 %5, i32 addrspace(1)* %out
250 ; FUNC-LABEL: {{^}}char_array:
254 ; SI-PROMOTE-DAG: buffer_store_short v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding:
256 ; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ; encoding: [0x04,0x00,0x60,0xe0
257 ; SI-ALLOCA-DAG: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:5 ; encoding: [0x05,0x00,0x60,0xe0
258 define amdgpu_kernel void @char_array(i32 addrspace(1)* %out, i32 %index) #0 {
261 %1 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 0
262 %2 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 1
265 %3 = getelementptr inbounds [2 x i8], [2 x i8]* %0, i32 0, i32 %index
267 %5 = sext i8 %4 to i32
268 store i32 %5, i32 addrspace(1)* %out
272 ; Test that two stack objects are not stored in the same register
273 ; The second stack object should be in T3.X
274 ; FUNC-LABEL: {{^}}no_overlap:
276 ; R600-CHECK: [[CHAN:[XYZW]]]+
277 ; R600-NOT: [[CHAN]]+
279 ; A total of 5 bytes should be allocated and used.
280 ; SI: buffer_store_byte v{{[0-9]+}}, off, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offset:4 ;
281 define amdgpu_kernel void @no_overlap(i32 addrspace(1)* %out, i32 %in) #0 {
283 %0 = alloca [3 x i8], align 1
284 %1 = alloca [2 x i8], align 1
285 %2 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 0
286 %3 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 1
287 %4 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 2
288 %5 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 0
289 %6 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 1
295 %7 = getelementptr [3 x i8], [3 x i8]* %0, i32 0, i32 %in
296 %8 = getelementptr [2 x i8], [2 x i8]* %1, i32 0, i32 %in
298 %10 = load i8, i8* %8
300 %12 = sext i8 %11 to i32
301 store i32 %12, i32 addrspace(1)* %out
305 define amdgpu_kernel void @char_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
307 %alloca = alloca [2 x [2 x i8]]
308 %gep0 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 0
309 %gep1 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 1
310 store i8 0, i8* %gep0
311 store i8 1, i8* %gep1
312 %gep2 = getelementptr [2 x [2 x i8]], [2 x [2 x i8]]* %alloca, i32 0, i32 0, i32 %index
313 %load = load i8, i8* %gep2
314 %sext = sext i8 %load to i32
315 store i32 %sext, i32 addrspace(1)* %out
319 define amdgpu_kernel void @i32_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
321 %alloca = alloca [2 x [2 x i32]]
322 %gep0 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 0
323 %gep1 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 1
324 store i32 0, i32* %gep0
325 store i32 1, i32* %gep1
326 %gep2 = getelementptr [2 x [2 x i32]], [2 x [2 x i32]]* %alloca, i32 0, i32 0, i32 %index
327 %load = load i32, i32* %gep2
328 store i32 %load, i32 addrspace(1)* %out
332 define amdgpu_kernel void @i64_array_array(i64 addrspace(1)* %out, i32 %index) #0 {
334 %alloca = alloca [2 x [2 x i64]]
335 %gep0 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 0
336 %gep1 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 1
337 store i64 0, i64* %gep0
338 store i64 1, i64* %gep1
339 %gep2 = getelementptr [2 x [2 x i64]], [2 x [2 x i64]]* %alloca, i32 0, i32 0, i32 %index
340 %load = load i64, i64* %gep2
341 store i64 %load, i64 addrspace(1)* %out
345 %struct.pair32 = type { i32, i32 }
347 define amdgpu_kernel void @struct_array_array(i32 addrspace(1)* %out, i32 %index) #0 {
349 %alloca = alloca [2 x [2 x %struct.pair32]]
350 %gep0 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 0, i32 1
351 %gep1 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 1, i32 1
352 store i32 0, i32* %gep0
353 store i32 1, i32* %gep1
354 %gep2 = getelementptr [2 x [2 x %struct.pair32]], [2 x [2 x %struct.pair32]]* %alloca, i32 0, i32 0, i32 %index, i32 0
355 %load = load i32, i32* %gep2
356 store i32 %load, i32 addrspace(1)* %out
360 define amdgpu_kernel void @struct_pair32_array(i32 addrspace(1)* %out, i32 %index) #0 {
362 %alloca = alloca [2 x %struct.pair32]
363 %gep0 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 0, i32 1
364 %gep1 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 1, i32 0
365 store i32 0, i32* %gep0
366 store i32 1, i32* %gep1
367 %gep2 = getelementptr [2 x %struct.pair32], [2 x %struct.pair32]* %alloca, i32 0, i32 %index, i32 0
368 %load = load i32, i32* %gep2
369 store i32 %load, i32 addrspace(1)* %out
373 define amdgpu_kernel void @select_private(i32 addrspace(1)* %out, i32 %in) nounwind {
375 %tmp = alloca [2 x i32]
376 %tmp1 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 0
377 %tmp2 = getelementptr [2 x i32], [2 x i32]* %tmp, i32 0, i32 1
378 store i32 0, i32* %tmp1
379 store i32 1, i32* %tmp2
380 %cmp = icmp eq i32 %in, 0
381 %sel = select i1 %cmp, i32* %tmp1, i32* %tmp2
382 %load = load i32, i32* %sel
383 store i32 %load, i32 addrspace(1)* %out
387 ; AMDGPUPromoteAlloca does not know how to handle ptrtoint. When it
388 ; finds one, it should stop trying to promote.
390 ; FUNC-LABEL: ptrtoint:
392 ; SI: buffer_store_dword v{{[0-9]+}}, v{{[0-9]+}}, s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen
393 ; SI: v_add_{{[iu]}}32_e32 [[ADD_OFFSET:v[0-9]+]], vcc, 5,
394 ; SI: buffer_load_dword v{{[0-9]+}}, [[ADD_OFFSET:v[0-9]+]], s[{{[0-9]+:[0-9]+}}], s{{[0-9]+}} offen ;
395 define amdgpu_kernel void @ptrtoint(i32 addrspace(1)* %out, i32 %a, i32 %b) #0 {
396 %alloca = alloca [16 x i32]
397 %tmp0 = getelementptr [16 x i32], [16 x i32]* %alloca, i32 0, i32 %a
398 store i32 5, i32* %tmp0
399 %tmp1 = ptrtoint [16 x i32]* %alloca to i32
400 %tmp2 = add i32 %tmp1, 5
401 %tmp3 = inttoptr i32 %tmp2 to i32*
402 %tmp4 = getelementptr i32, i32* %tmp3, i32 %b
403 %tmp5 = load i32, i32* %tmp4
404 store i32 %tmp5, i32 addrspace(1)* %out
408 ; OPT-LABEL: @pointer_typed_alloca(
409 ; OPT: getelementptr inbounds [256 x i32 addrspace(1)*], [256 x i32 addrspace(1)*] addrspace(3)* @pointer_typed_alloca.A.addr, i32 0, i32 %{{[0-9]+}}
410 ; OPT: load i32 addrspace(1)*, i32 addrspace(1)* addrspace(3)* %{{[0-9]+}}, align 4
411 define amdgpu_kernel void @pointer_typed_alloca(i32 addrspace(1)* %A) {
413 %A.addr = alloca i32 addrspace(1)*, align 4
414 store i32 addrspace(1)* %A, i32 addrspace(1)** %A.addr, align 4
415 %ld0 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4
416 %arrayidx = getelementptr inbounds i32, i32 addrspace(1)* %ld0, i32 0
417 store i32 1, i32 addrspace(1)* %arrayidx, align 4
418 %ld1 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4
419 %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %ld1, i32 1
420 store i32 2, i32 addrspace(1)* %arrayidx1, align 4
421 %ld2 = load i32 addrspace(1)*, i32 addrspace(1)** %A.addr, align 4
422 %arrayidx2 = getelementptr inbounds i32, i32 addrspace(1)* %ld2, i32 2
423 store i32 3, i32 addrspace(1)* %arrayidx2, align 4
427 ; FUNC-LABEL: v16i32_stack:
446 ; SI: buffer_load_dword
447 ; SI: buffer_load_dword
448 ; SI: buffer_load_dword
449 ; SI: buffer_load_dword
450 ; SI: buffer_load_dword
451 ; SI: buffer_load_dword
452 ; SI: buffer_load_dword
453 ; SI: buffer_load_dword
454 ; SI: buffer_load_dword
455 ; SI: buffer_load_dword
456 ; SI: buffer_load_dword
457 ; SI: buffer_load_dword
458 ; SI: buffer_load_dword
459 ; SI: buffer_load_dword
460 ; SI: buffer_load_dword
461 ; SI: buffer_load_dword
463 define amdgpu_kernel void @v16i32_stack(<16 x i32> addrspace(1)* %out, i32 %a) {
464 %alloca = alloca [2 x <16 x i32>]
465 %tmp0 = getelementptr [2 x <16 x i32>], [2 x <16 x i32>]* %alloca, i32 0, i32 %a
466 %tmp5 = load <16 x i32>, <16 x i32>* %tmp0
467 store <16 x i32> %tmp5, <16 x i32> addrspace(1)* %out
471 ; FUNC-LABEL: v16float_stack:
490 ; SI: buffer_load_dword
491 ; SI: buffer_load_dword
492 ; SI: buffer_load_dword
493 ; SI: buffer_load_dword
494 ; SI: buffer_load_dword
495 ; SI: buffer_load_dword
496 ; SI: buffer_load_dword
497 ; SI: buffer_load_dword
498 ; SI: buffer_load_dword
499 ; SI: buffer_load_dword
500 ; SI: buffer_load_dword
501 ; SI: buffer_load_dword
502 ; SI: buffer_load_dword
503 ; SI: buffer_load_dword
504 ; SI: buffer_load_dword
505 ; SI: buffer_load_dword
507 define amdgpu_kernel void @v16float_stack(<16 x float> addrspace(1)* %out, i32 %a) {
508 %alloca = alloca [2 x <16 x float>]
509 %tmp0 = getelementptr [2 x <16 x float>], [2 x <16 x float>]* %alloca, i32 0, i32 %a
510 %tmp5 = load <16 x float>, <16 x float>* %tmp0
511 store <16 x float> %tmp5, <16 x float> addrspace(1)* %out
515 ; FUNC-LABEL: v2float_stack:
520 ; SI: buffer_load_dword
521 ; SI: buffer_load_dword
523 define amdgpu_kernel void @v2float_stack(<2 x float> addrspace(1)* %out, i32 %a) {
524 %alloca = alloca [16 x <2 x float>]
525 %tmp0 = getelementptr [16 x <2 x float>], [16 x <2 x float>]* %alloca, i32 0, i32 %a
526 %tmp5 = load <2 x float>, <2 x float>* %tmp0
527 store <2 x float> %tmp5, <2 x float> addrspace(1)* %out
531 ; OPT-LABEL: @direct_alloca_read_0xi32(
532 ; OPT: store [0 x i32] undef, [0 x i32] addrspace(3)*
533 ; OPT: load [0 x i32], [0 x i32] addrspace(3)*
534 define amdgpu_kernel void @direct_alloca_read_0xi32([0 x i32] addrspace(1)* %out, i32 %index) {
536 %tmp = alloca [0 x i32]
537 store [0 x i32] [], [0 x i32]* %tmp
538 %load = load [0 x i32], [0 x i32]* %tmp
539 store [0 x i32] %load, [0 x i32] addrspace(1)* %out
543 ; OPT-LABEL: @direct_alloca_read_1xi32(
544 ; OPT: store [1 x i32] zeroinitializer, [1 x i32] addrspace(3)*
545 ; OPT: load [1 x i32], [1 x i32] addrspace(3)*
546 define amdgpu_kernel void @direct_alloca_read_1xi32([1 x i32] addrspace(1)* %out, i32 %index) {
548 %tmp = alloca [1 x i32]
549 store [1 x i32] [i32 0], [1 x i32]* %tmp
550 %load = load [1 x i32], [1 x i32]* %tmp
551 store [1 x i32] %load, [1 x i32] addrspace(1)* %out
555 attributes #0 = { nounwind "amdgpu-waves-per-eu"="1,2" }
558 ; HSAOPT: !1 = !{i32 0, i32 257}
559 ; HSAOPT: !2 = !{i32 0, i32 256}
561 ; NOHSAOPT: !0 = !{i32 0, i32 257}
562 ; NOHSAOPT: !1 = !{i32 0, i32 256}