1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals all --version 4
2 ; RUN: opt < %s -passes=amdgpu-sw-lower-lds -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
4 @lds_1 = internal addrspace(3) global [1 x i32] poison, align 4
5 @lds_2 = internal addrspace(3) global [1 x i32] poison, align 4
7 ; Test to check if static LDS accesses in kernel are lowered correctly.
9 ; CHECK: @llvm.amdgcn.sw.lds.atomicrmw_kernel = internal addrspace(3) global ptr poison, no_sanitize_address, align 4, !absolute_symbol [[META0:![0-9]+]]
10 ; CHECK: @llvm.amdgcn.sw.lds.atomicrmw_kernel.md = internal addrspace(1) global %llvm.amdgcn.sw.lds.atomicrmw_kernel.md.type { %llvm.amdgcn.sw.lds.atomicrmw_kernel.md.item { i32 0, i32 8, i32 32 }, %llvm.amdgcn.sw.lds.atomicrmw_kernel.md.item { i32 32, i32 4, i32 32 }, %llvm.amdgcn.sw.lds.atomicrmw_kernel.md.item { i32 64, i32 4, i32 32 } }, no_sanitize_address
12 define amdgpu_kernel void @atomicrmw_kernel(ptr addrspace(1) %arg0) sanitize_address {
13 ; CHECK-LABEL: define amdgpu_kernel void @atomicrmw_kernel(
14 ; CHECK-SAME: ptr addrspace(1) [[ARG0:%.*]]) #[[ATTR0:[0-9]+]] {
16 ; CHECK-NEXT: [[TMP0:%.*]] = call i32 @llvm.amdgcn.workitem.id.x()
17 ; CHECK-NEXT: [[TMP26:%.*]] = call i32 @llvm.amdgcn.workitem.id.y()
18 ; CHECK-NEXT: [[TMP45:%.*]] = call i32 @llvm.amdgcn.workitem.id.z()
19 ; CHECK-NEXT: [[TMP64:%.*]] = or i32 [[TMP0]], [[TMP26]]
20 ; CHECK-NEXT: [[TMP65:%.*]] = or i32 [[TMP64]], [[TMP45]]
21 ; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i32 [[TMP65]], 0
22 ; CHECK-NEXT: br i1 [[TMP5]], label [[MALLOC:%.*]], label [[TMP20:%.*]]
24 ; CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_ATOMICRMW_KERNEL_MD_TYPE:%.*]], ptr addrspace(1) @llvm.amdgcn.sw.lds.atomicrmw_kernel.md, i32 0, i32 2, i32 0), align 4
25 ; CHECK-NEXT: [[TMP7:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_ATOMICRMW_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.atomicrmw_kernel.md, i32 0, i32 2, i32 2), align 4
26 ; CHECK-NEXT: [[TMP8:%.*]] = add i32 [[TMP6]], [[TMP7]]
27 ; CHECK-NEXT: [[TMP9:%.*]] = zext i32 [[TMP8]] to i64
28 ; CHECK-NEXT: [[TMP10:%.*]] = call ptr @llvm.returnaddress(i32 0)
29 ; CHECK-NEXT: [[TMP11:%.*]] = ptrtoint ptr [[TMP10]] to i64
30 ; CHECK-NEXT: [[TMP12:%.*]] = call i64 @__asan_malloc_impl(i64 [[TMP9]], i64 [[TMP11]])
31 ; CHECK-NEXT: [[TMP13:%.*]] = inttoptr i64 [[TMP12]] to ptr addrspace(1)
32 ; CHECK-NEXT: store ptr addrspace(1) [[TMP13]], ptr addrspace(3) @llvm.amdgcn.sw.lds.atomicrmw_kernel, align 8
33 ; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 8
34 ; CHECK-NEXT: [[TMP15:%.*]] = ptrtoint ptr addrspace(1) [[TMP14]] to i64
35 ; CHECK-NEXT: call void @__asan_poison_region(i64 [[TMP15]], i64 24)
36 ; CHECK-NEXT: [[TMP16:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 36
37 ; CHECK-NEXT: [[TMP17:%.*]] = ptrtoint ptr addrspace(1) [[TMP16]] to i64
38 ; CHECK-NEXT: call void @__asan_poison_region(i64 [[TMP17]], i64 28)
39 ; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP13]], i64 68
40 ; CHECK-NEXT: [[TMP19:%.*]] = ptrtoint ptr addrspace(1) [[TMP18]] to i64
41 ; CHECK-NEXT: call void @__asan_poison_region(i64 [[TMP19]], i64 28)
42 ; CHECK-NEXT: br label [[TMP20]]
44 ; CHECK-NEXT: [[XYZCOND:%.*]] = phi i1 [ false, [[WID:%.*]] ], [ true, [[MALLOC]] ]
45 ; CHECK-NEXT: call void @llvm.amdgcn.s.barrier()
46 ; CHECK-NEXT: [[TMP21:%.*]] = load ptr addrspace(1), ptr addrspace(3) @llvm.amdgcn.sw.lds.atomicrmw_kernel, align 8
47 ; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_ATOMICRMW_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.atomicrmw_kernel.md, i32 0, i32 1, i32 0), align 4
48 ; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.atomicrmw_kernel, i32 [[TMP22]]
49 ; CHECK-NEXT: [[TMP24:%.*]] = load i32, ptr addrspace(1) getelementptr inbounds ([[LLVM_AMDGCN_SW_LDS_ATOMICRMW_KERNEL_MD_TYPE]], ptr addrspace(1) @llvm.amdgcn.sw.lds.atomicrmw_kernel.md, i32 0, i32 2, i32 0), align 4
50 ; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds i8, ptr addrspace(3) @llvm.amdgcn.sw.lds.atomicrmw_kernel, i32 [[TMP24]]
51 ; CHECK-NEXT: [[TMP1:%.*]] = load volatile i32, ptr addrspace(1) [[ARG0]], align 4
52 ; CHECK-NEXT: [[TMP27:%.*]] = ptrtoint ptr addrspace(3) [[TMP23]] to i32
53 ; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP21]], i32 [[TMP27]]
54 ; CHECK-NEXT: [[TMP29:%.*]] = ptrtoint ptr addrspace(1) [[TMP28]] to i64
55 ; CHECK-NEXT: [[TMP35:%.*]] = add i64 [[TMP29]], 3
56 ; CHECK-NEXT: [[TMP98:%.*]] = inttoptr i64 [[TMP35]] to ptr addrspace(1)
57 ; CHECK-NEXT: [[TMP99:%.*]] = ptrtoint ptr addrspace(1) [[TMP28]] to i64
58 ; CHECK-NEXT: [[TMP30:%.*]] = lshr i64 [[TMP99]], 3
59 ; CHECK-NEXT: [[TMP31:%.*]] = add i64 [[TMP30]], 2147450880
60 ; CHECK-NEXT: [[TMP32:%.*]] = inttoptr i64 [[TMP31]] to ptr
61 ; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP32]], align 1
62 ; CHECK-NEXT: [[TMP34:%.*]] = icmp ne i8 [[TMP33]], 0
63 ; CHECK-NEXT: [[TMP36:%.*]] = and i64 [[TMP99]], 7
64 ; CHECK-NEXT: [[TMP37:%.*]] = trunc i64 [[TMP36]] to i8
65 ; CHECK-NEXT: [[TMP38:%.*]] = icmp sge i8 [[TMP37]], [[TMP33]]
66 ; CHECK-NEXT: [[TMP39:%.*]] = and i1 [[TMP34]], [[TMP38]]
67 ; CHECK-NEXT: [[TMP40:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP39]])
68 ; CHECK-NEXT: [[TMP41:%.*]] = icmp ne i64 [[TMP40]], 0
69 ; CHECK-NEXT: br i1 [[TMP41]], label [[ASAN_REPORT:%.*]], label [[TMP44:%.*]], !prof [[PROF2:![0-9]+]]
71 ; CHECK-NEXT: br i1 [[TMP39]], label [[TMP42:%.*]], label [[TMP43:%.*]]
73 ; CHECK-NEXT: call void @__asan_report_store1(i64 [[TMP99]]) #[[ATTR6:[0-9]+]]
74 ; CHECK-NEXT: call void @llvm.amdgcn.unreachable()
75 ; CHECK-NEXT: br label [[TMP43]]
77 ; CHECK-NEXT: br label [[TMP44]]
79 ; CHECK-NEXT: [[TMP100:%.*]] = ptrtoint ptr addrspace(1) [[TMP98]] to i64
80 ; CHECK-NEXT: [[TMP101:%.*]] = lshr i64 [[TMP100]], 3
81 ; CHECK-NEXT: [[TMP102:%.*]] = add i64 [[TMP101]], 2147450880
82 ; CHECK-NEXT: [[TMP103:%.*]] = inttoptr i64 [[TMP102]] to ptr
83 ; CHECK-NEXT: [[TMP104:%.*]] = load i8, ptr [[TMP103]], align 1
84 ; CHECK-NEXT: [[TMP105:%.*]] = icmp ne i8 [[TMP104]], 0
85 ; CHECK-NEXT: [[TMP106:%.*]] = and i64 [[TMP100]], 7
86 ; CHECK-NEXT: [[TMP54:%.*]] = trunc i64 [[TMP106]] to i8
87 ; CHECK-NEXT: [[TMP107:%.*]] = icmp sge i8 [[TMP54]], [[TMP104]]
88 ; CHECK-NEXT: [[TMP108:%.*]] = and i1 [[TMP105]], [[TMP107]]
89 ; CHECK-NEXT: [[TMP109:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP108]])
90 ; CHECK-NEXT: [[TMP110:%.*]] = icmp ne i64 [[TMP109]], 0
91 ; CHECK-NEXT: br i1 [[TMP110]], label [[ASAN_REPORT1:%.*]], label [[TMP111:%.*]], !prof [[PROF2]]
92 ; CHECK: asan.report1:
93 ; CHECK-NEXT: br i1 [[TMP108]], label [[TMP112:%.*]], label [[TMP113:%.*]]
95 ; CHECK-NEXT: call void @__asan_report_store1(i64 [[TMP100]]) #[[ATTR6]]
96 ; CHECK-NEXT: call void @llvm.amdgcn.unreachable()
97 ; CHECK-NEXT: br label [[TMP113]]
99 ; CHECK-NEXT: br label [[TMP111]]
101 ; CHECK-NEXT: [[TMP2:%.*]] = atomicrmw umin ptr addrspace(1) [[TMP28]], i32 [[TMP1]] seq_cst, align 4
102 ; CHECK-NEXT: [[TMP46:%.*]] = ptrtoint ptr addrspace(3) [[TMP23]] to i32
103 ; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP21]], i32 [[TMP46]]
104 ; CHECK-NEXT: [[TMP48:%.*]] = ptrtoint ptr addrspace(1) [[TMP47]] to i64
105 ; CHECK-NEXT: [[TMP114:%.*]] = add i64 [[TMP48]], 3
106 ; CHECK-NEXT: [[TMP115:%.*]] = inttoptr i64 [[TMP114]] to ptr addrspace(1)
107 ; CHECK-NEXT: [[TMP116:%.*]] = ptrtoint ptr addrspace(1) [[TMP47]] to i64
108 ; CHECK-NEXT: [[TMP49:%.*]] = lshr i64 [[TMP116]], 3
109 ; CHECK-NEXT: [[TMP50:%.*]] = add i64 [[TMP49]], 2147450880
110 ; CHECK-NEXT: [[TMP51:%.*]] = inttoptr i64 [[TMP50]] to ptr
111 ; CHECK-NEXT: [[TMP52:%.*]] = load i8, ptr [[TMP51]], align 1
112 ; CHECK-NEXT: [[TMP53:%.*]] = icmp ne i8 [[TMP52]], 0
113 ; CHECK-NEXT: [[TMP55:%.*]] = and i64 [[TMP116]], 7
114 ; CHECK-NEXT: [[TMP56:%.*]] = trunc i64 [[TMP55]] to i8
115 ; CHECK-NEXT: [[TMP57:%.*]] = icmp sge i8 [[TMP56]], [[TMP52]]
116 ; CHECK-NEXT: [[TMP58:%.*]] = and i1 [[TMP53]], [[TMP57]]
117 ; CHECK-NEXT: [[TMP59:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP58]])
118 ; CHECK-NEXT: [[TMP60:%.*]] = icmp ne i64 [[TMP59]], 0
119 ; CHECK-NEXT: br i1 [[TMP60]], label [[ASAN_REPORT2:%.*]], label [[TMP63:%.*]], !prof [[PROF2]]
120 ; CHECK: asan.report2:
121 ; CHECK-NEXT: br i1 [[TMP58]], label [[TMP61:%.*]], label [[TMP62:%.*]]
123 ; CHECK-NEXT: call void @__asan_report_store1(i64 [[TMP116]]) #[[ATTR6]]
124 ; CHECK-NEXT: call void @llvm.amdgcn.unreachable()
125 ; CHECK-NEXT: br label [[TMP62]]
127 ; CHECK-NEXT: br label [[TMP63]]
129 ; CHECK-NEXT: [[TMP117:%.*]] = ptrtoint ptr addrspace(1) [[TMP115]] to i64
130 ; CHECK-NEXT: [[TMP118:%.*]] = lshr i64 [[TMP117]], 3
131 ; CHECK-NEXT: [[TMP119:%.*]] = add i64 [[TMP118]], 2147450880
132 ; CHECK-NEXT: [[TMP120:%.*]] = inttoptr i64 [[TMP119]] to ptr
133 ; CHECK-NEXT: [[TMP87:%.*]] = load i8, ptr [[TMP120]], align 1
134 ; CHECK-NEXT: [[TMP88:%.*]] = icmp ne i8 [[TMP87]], 0
135 ; CHECK-NEXT: [[TMP89:%.*]] = and i64 [[TMP117]], 7
136 ; CHECK-NEXT: [[TMP90:%.*]] = trunc i64 [[TMP89]] to i8
137 ; CHECK-NEXT: [[TMP91:%.*]] = icmp sge i8 [[TMP90]], [[TMP87]]
138 ; CHECK-NEXT: [[TMP92:%.*]] = and i1 [[TMP88]], [[TMP91]]
139 ; CHECK-NEXT: [[TMP93:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP92]])
140 ; CHECK-NEXT: [[TMP94:%.*]] = icmp ne i64 [[TMP93]], 0
141 ; CHECK-NEXT: br i1 [[TMP94]], label [[ASAN_REPORT3:%.*]], label [[TMP97:%.*]], !prof [[PROF2]]
142 ; CHECK: asan.report3:
143 ; CHECK-NEXT: br i1 [[TMP92]], label [[TMP95:%.*]], label [[TMP96:%.*]]
145 ; CHECK-NEXT: call void @__asan_report_store1(i64 [[TMP117]]) #[[ATTR6]]
146 ; CHECK-NEXT: call void @llvm.amdgcn.unreachable()
147 ; CHECK-NEXT: br label [[TMP96]]
149 ; CHECK-NEXT: br label [[TMP97]]
151 ; CHECK-NEXT: [[TMP3:%.*]] = atomicrmw umax ptr addrspace(1) [[TMP47]], i32 [[TMP1]] seq_cst, align 4
152 ; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP2]], [[TMP3]]
153 ; CHECK-NEXT: [[TMP66:%.*]] = ptrtoint ptr addrspace(3) [[TMP25]] to i32
154 ; CHECK-NEXT: [[TMP67:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[TMP21]], i32 [[TMP66]]
155 ; CHECK-NEXT: [[TMP68:%.*]] = ptrtoint ptr addrspace(1) [[TMP67]] to i64
156 ; CHECK-NEXT: [[TMP69:%.*]] = lshr i64 [[TMP68]], 3
157 ; CHECK-NEXT: [[TMP70:%.*]] = add i64 [[TMP69]], 2147450880
158 ; CHECK-NEXT: [[TMP71:%.*]] = inttoptr i64 [[TMP70]] to ptr
159 ; CHECK-NEXT: [[TMP72:%.*]] = load i8, ptr [[TMP71]], align 1
160 ; CHECK-NEXT: [[TMP73:%.*]] = icmp ne i8 [[TMP72]], 0
161 ; CHECK-NEXT: [[TMP74:%.*]] = and i64 [[TMP68]], 7
162 ; CHECK-NEXT: [[TMP75:%.*]] = add i64 [[TMP74]], 3
163 ; CHECK-NEXT: [[TMP76:%.*]] = trunc i64 [[TMP75]] to i8
164 ; CHECK-NEXT: [[TMP77:%.*]] = icmp sge i8 [[TMP76]], [[TMP72]]
165 ; CHECK-NEXT: [[TMP78:%.*]] = and i1 [[TMP73]], [[TMP77]]
166 ; CHECK-NEXT: [[TMP79:%.*]] = call i64 @llvm.amdgcn.ballot.i64(i1 [[TMP78]])
167 ; CHECK-NEXT: [[TMP80:%.*]] = icmp ne i64 [[TMP79]], 0
168 ; CHECK-NEXT: br i1 [[TMP80]], label [[ASAN_REPORT4:%.*]], label [[TMP83:%.*]], !prof [[PROF2]]
169 ; CHECK: asan.report4:
170 ; CHECK-NEXT: br i1 [[TMP78]], label [[TMP81:%.*]], label [[TMP82:%.*]]
172 ; CHECK-NEXT: call void @__asan_report_store4(i64 [[TMP68]]) #[[ATTR6]]
173 ; CHECK-NEXT: call void @llvm.amdgcn.unreachable()
174 ; CHECK-NEXT: br label [[TMP82]]
176 ; CHECK-NEXT: br label [[TMP83]]
178 ; CHECK-NEXT: store i32 [[TMP4]], ptr addrspace(1) [[TMP67]], align 4
179 ; CHECK-NEXT: br label [[CONDFREE:%.*]]
181 ; CHECK-NEXT: call void @llvm.amdgcn.s.barrier()
182 ; CHECK-NEXT: br i1 [[XYZCOND]], label [[FREE:%.*]], label [[END:%.*]]
184 ; CHECK-NEXT: [[TMP84:%.*]] = call ptr @llvm.returnaddress(i32 0)
185 ; CHECK-NEXT: [[TMP85:%.*]] = ptrtoint ptr [[TMP84]] to i64
186 ; CHECK-NEXT: [[TMP86:%.*]] = ptrtoint ptr addrspace(1) [[TMP21]] to i64
187 ; CHECK-NEXT: call void @__asan_free_impl(i64 [[TMP86]], i64 [[TMP85]])
188 ; CHECK-NEXT: br label [[END]]
190 ; CHECK-NEXT: ret void
192 %1 = load volatile i32, ptr addrspace(1) %arg0
193 %2 = atomicrmw umin ptr addrspace(3) @lds_1, i32 %1 seq_cst
194 %3 = atomicrmw umax ptr addrspace(3) @lds_1, i32 %1 seq_cst
196 store i32 %4, ptr addrspace(3) @lds_2, align 4
199 !llvm.module.flags = !{!0}
200 !0 = !{i32 4, !"nosanitize_address", i32 1}
203 ; CHECK: attributes #[[ATTR0]] = { sanitize_address "amdgpu-lds-size"="8" }
204 ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
205 ; CHECK: attributes #[[ATTR2:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) }
206 ; CHECK: attributes #[[ATTR3:[0-9]+]] = { convergent nocallback nofree nounwind willreturn }
207 ; CHECK: attributes #[[ATTR4:[0-9]+]] = { convergent nocallback nofree nounwind willreturn memory(none) }
208 ; CHECK: attributes #[[ATTR5:[0-9]+]] = { convergent nocallback nofree nounwind }
209 ; CHECK: attributes #[[ATTR6]] = { nomerge }
211 ; CHECK: [[META0]] = !{i32 0, i32 1}
212 ; CHECK: [[META1:![0-9]+]] = !{i32 4, !"nosanitize_address", i32 1}
213 ; CHECK: [[PROF2]] = !{!"branch_weights", i32 1, i32 1048575}