1 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,CIVI %s
2 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,VI,CIVI %s
3 ; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9 %s
4 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -stop-before=machine-scheduler < %s | FileCheck -enable-var-scope -check-prefixes=MIR %s
6 declare i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* nocapture, i32, i32, i32, i1) #2
7 declare i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* nocapture, i32, i32, i32, i1) #2
8 declare i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* nocapture, i32, i32, i32, i1) #2
10 declare i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* nocapture, i64, i32, i32, i1) #2
11 declare i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* nocapture, i64, i32, i32, i1) #2
12 declare i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* nocapture, i64, i32, i32, i1) #2
14 declare i32 @llvm.amdgcn.workitem.id.x() #1
16 ; GCN-LABEL: {{^}}lds_atomic_inc_ret_i32:
17 ; CIVI-DAG: s_mov_b32 m0
20 ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 42
21 ; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]]
22 ; MIR-LABEL: @lds_atomic_inc_ret_i32
23 ; MIR: DS_INC_RTN_U32 {{.*}} :: (load store 4 on %{{.*}}, !noalias !{{[0-9]+}}, addrspace 3)
24 define amdgpu_kernel void @lds_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
25 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false), !noalias !0
26 store i32 %result, i32 addrspace(1)* %out
30 !0 = distinct !{!0, !"noalias-scope"}
32 ; GCN-LABEL: {{^}}lds_atomic_inc_ret_i32_offset:
33 ; CIVI-DAG: s_mov_b32 m0
36 ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 42
37 ; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]] offset:16
38 define amdgpu_kernel void @lds_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(3)* %ptr) #0 {
39 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
40 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %gep, i32 42, i32 0, i32 0, i1 false)
41 store i32 %result, i32 addrspace(1)* %out
45 ; GCN-LABEL: {{^}}lds_atomic_inc_noret_i32:
46 ; CIVI-DAG: s_mov_b32 m0
49 ; GCN-DAG: s_load_dword [[SPTR:s[0-9]+]],
50 ; GCN-DAG: v_mov_b32_e32 [[DATA:v[0-9]+]], 4
51 ; GCN-DAG: v_mov_b32_e32 [[VPTR:v[0-9]+]], [[SPTR]]
52 ; GCN: ds_inc_u32 [[VPTR]], [[DATA]]
53 define amdgpu_kernel void @lds_atomic_inc_noret_i32(i32 addrspace(3)* %ptr) nounwind {
54 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
58 ; GCN-LABEL: {{^}}lds_atomic_inc_noret_i32_offset:
59 ; CIVI-DAG: s_mov_b32 m0
62 ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 42
63 ; GCN: ds_inc_u32 v{{[0-9]+}}, [[K]] offset:16
64 define amdgpu_kernel void @lds_atomic_inc_noret_i32_offset(i32 addrspace(3)* %ptr) nounwind {
65 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4
66 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %gep, i32 42, i32 0, i32 0, i1 false)
70 ; GCN-LABEL: {{^}}global_atomic_inc_ret_i32:
71 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
72 ; CIVI: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 glc{{$}}
73 ; GFX9: global_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]], off glc{{$}}
74 define amdgpu_kernel void @global_atomic_inc_ret_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
75 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 42, i32 0, i32 0, i1 false)
76 store i32 %result, i32 addrspace(1)* %out
80 ; GCN-LABEL: {{^}}global_atomic_inc_ret_i32_offset:
81 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
82 ; CIVI: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16 glc{{$}}
83 ; GFX9: global_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]], off offset:16 glc{{$}}
84 define amdgpu_kernel void @global_atomic_inc_ret_i32_offset(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
85 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
86 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
87 store i32 %result, i32 addrspace(1)* %out
91 ; GCN-LABEL: {{^}}global_atomic_inc_noret_i32:
92 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
93 ; CIVI: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
94 ; GFX9: global_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]], off{{$}}
95 define amdgpu_kernel void @global_atomic_inc_noret_i32(i32 addrspace(1)* %ptr) nounwind {
96 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %ptr, i32 42, i32 0, i32 0, i1 false)
100 ; GCN-LABEL: {{^}}global_atomic_inc_noret_i32_offset:
101 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
102 ; CIVI: buffer_atomic_inc [[K]], off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:16{{$}}
103 ; GFX9: global_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]], off offset:16{{$}}
104 define amdgpu_kernel void @global_atomic_inc_noret_i32_offset(i32 addrspace(1)* %ptr) nounwind {
105 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4
106 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
110 ; GCN-LABEL: {{^}}global_atomic_inc_ret_i32_offset_addr64:
111 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
112 ; CI: buffer_atomic_inc [[K]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:20 glc{{$}}
113 ; VI: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
114 define amdgpu_kernel void @global_atomic_inc_ret_i32_offset_addr64(i32 addrspace(1)* %out, i32 addrspace(1)* %ptr) #0 {
115 %id = call i32 @llvm.amdgcn.workitem.id.x()
116 %gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
117 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id
118 %gep = getelementptr i32, i32 addrspace(1)* %gep.tid, i32 5
119 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
120 store i32 %result, i32 addrspace(1)* %out.gep
124 ; GCN-LABEL: {{^}}global_atomic_inc_noret_i32_offset_addr64:
125 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
126 ; CI: buffer_atomic_inc [[K]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:20{{$}}
127 ; VI: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
128 define amdgpu_kernel void @global_atomic_inc_noret_i32_offset_addr64(i32 addrspace(1)* %ptr) #0 {
129 %id = call i32 @llvm.amdgcn.workitem.id.x()
130 %gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id
131 %gep = getelementptr i32, i32 addrspace(1)* %gep.tid, i32 5
132 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42, i32 0, i32 0, i1 false)
136 @lds0 = addrspace(3) global [512 x i32] undef, align 4
138 ; GCN-LABEL: {{^}}atomic_inc_shl_base_lds_0_i32:
139 ; CIVI-DAG: v_lshlrev_b32_e32 [[OFS:v[0-9]+]], 2, {{v[0-9]+}}
140 ; CIVI-DAG: v_add_{{[ui]}}32_e32 [[PTR:v[0-9]+]], vcc, lds0@abs32@lo, [[OFS]]
141 ; GFX9-DAG: s_mov_b32 [[BASE:s[0-9]+]], lds0@abs32@lo
142 ; GFX9-DAG: v_lshl_add_u32 [[PTR:v[0-9]+]], {{v[0-9]+}}, 2, [[BASE]]
143 ; GCN: ds_inc_rtn_u32 {{v[0-9]+}}, [[PTR]], {{v[0-9]+}} offset:8
144 define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
145 %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
146 %idx.0 = add nsw i32 %tid.x, 2
147 %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(3)* @lds0, i32 0, i32 %idx.0
148 %val0 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %arrayidx0, i32 9, i32 0, i32 0, i1 false)
149 store i32 %idx.0, i32 addrspace(1)* %add_use
150 store i32 %val0, i32 addrspace(1)* %out
154 ; GCN-LABEL: {{^}}lds_atomic_inc_ret_i64:
155 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
156 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
157 ; GCN: ds_inc_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
158 define amdgpu_kernel void @lds_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
159 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %ptr, i64 42, i32 0, i32 0, i1 false)
160 store i64 %result, i64 addrspace(1)* %out
164 ; GCN-LABEL: {{^}}lds_atomic_inc_ret_i64_offset:
165 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
166 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
167 ; GCN: ds_inc_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:32
168 define amdgpu_kernel void @lds_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(3)* %ptr) #0 {
169 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
170 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %gep, i64 42, i32 0, i32 0, i1 false)
171 store i64 %result, i64 addrspace(1)* %out
175 ; GCN-LABEL: {{^}}lds_atomic_inc_noret_i64:
176 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
177 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
178 ; GCN: ds_inc_u64 v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
179 define amdgpu_kernel void @lds_atomic_inc_noret_i64(i64 addrspace(3)* %ptr) nounwind {
180 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %ptr, i64 42, i32 0, i32 0, i1 false)
184 ; GCN-LABEL: {{^}}lds_atomic_inc_noret_i64_offset:
185 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
186 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
187 ; GCN: ds_inc_u64 v{{[0-9]+}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:32{{$}}
188 define amdgpu_kernel void @lds_atomic_inc_noret_i64_offset(i64 addrspace(3)* %ptr) nounwind {
189 %gep = getelementptr i64, i64 addrspace(3)* %ptr, i32 4
190 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %gep, i64 42, i32 0, i32 0, i1 false)
194 ; GCN-LABEL: {{^}}global_atomic_inc_ret_i64:
195 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
196 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
197 ; CIVI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 glc{{$}}
198 ; GFX9: global_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off glc{{$}}
199 define amdgpu_kernel void @global_atomic_inc_ret_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
200 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 42, i32 0, i32 0, i1 false)
201 store i64 %result, i64 addrspace(1)* %out
205 ; GCN-LABEL: {{^}}global_atomic_inc_ret_i64_offset:
206 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
207 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
208 ; CIVI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32 glc{{$}}
209 ; GFX9: global_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off offset:32 glc{{$}}
210 define amdgpu_kernel void @global_atomic_inc_ret_i64_offset(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
211 %gep = getelementptr i64, i64 addrspace(1)* %ptr, i32 4
212 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
213 store i64 %result, i64 addrspace(1)* %out
217 ; GCN-LABEL: {{^}}global_atomic_inc_noret_i64:
218 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
219 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
220 ; CIVI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0{{$}}
222 ; GFX9: global_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off{{$}}
223 define amdgpu_kernel void @global_atomic_inc_noret_i64(i64 addrspace(1)* %ptr) nounwind {
224 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %ptr, i64 42, i32 0, i32 0, i1 false)
228 ; GCN-LABEL: {{^}}global_atomic_inc_noret_i64_offset:
229 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
230 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
231 ; CIVI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off, s{{\[[0-9]+:[0-9]+\]}}, 0 offset:32{{$}}
232 ; GFX9: global_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}, off offset:32{{$}}
233 define amdgpu_kernel void @global_atomic_inc_noret_i64_offset(i64 addrspace(1)* %ptr) nounwind {
234 %gep = getelementptr i64, i64 addrspace(1)* %ptr, i32 4
235 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
239 ; GCN-LABEL: {{^}}global_atomic_inc_ret_i64_offset_addr64:
240 ; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
241 ; CI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
242 ; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
243 ; CI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40 glc{{$}}
244 ; VI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
245 define amdgpu_kernel void @global_atomic_inc_ret_i64_offset_addr64(i64 addrspace(1)* %out, i64 addrspace(1)* %ptr) #0 {
246 %id = call i32 @llvm.amdgcn.workitem.id.x()
247 %gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
248 %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id
249 %gep = getelementptr i64, i64 addrspace(1)* %gep.tid, i32 5
250 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
251 store i64 %result, i64 addrspace(1)* %out.gep
255 ; GCN-LABEL: {{^}}global_atomic_inc_noret_i64_offset_addr64:
256 ; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
257 ; CI: v_mov_b32_e32 v{{[0-9]+}}, 0{{$}}
258 ; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
259 ; CI: buffer_atomic_inc_x2 v{{\[}}[[KLO]]:[[KHI]]{{\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:40{{$}}
260 ; VI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}}{{$}}
261 define amdgpu_kernel void @global_atomic_inc_noret_i64_offset_addr64(i64 addrspace(1)* %ptr) #0 {
262 %id = call i32 @llvm.amdgcn.workitem.id.x()
263 %gep.tid = getelementptr i64, i64 addrspace(1)* %ptr, i32 %id
264 %gep = getelementptr i64, i64 addrspace(1)* %gep.tid, i32 5
265 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p1i64(i64 addrspace(1)* %gep, i64 42, i32 0, i32 0, i1 false)
269 ; GCN-LABEL: {{^}}flat_atomic_inc_ret_i32:
270 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
271 ; GCN: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
272 define amdgpu_kernel void @flat_atomic_inc_ret_i32(i32* %out, i32* %ptr) #0 {
273 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* %ptr, i32 42, i32 0, i32 0, i1 false)
274 store i32 %result, i32* %out
278 ; GCN-LABEL: {{^}}flat_atomic_inc_ret_i32_offset:
279 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
280 ; CIVI: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
281 ; GFX9: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:16 glc{{$}}
282 define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset(i32* %out, i32* %ptr) #0 {
283 %gep = getelementptr i32, i32* %ptr, i32 4
284 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* %gep, i32 42, i32 0, i32 0, i1 false)
285 store i32 %result, i32* %out
289 ; GCN-LABEL: {{^}}flat_atomic_inc_noret_i32:
290 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
291 ; GCN: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
292 define amdgpu_kernel void @flat_atomic_inc_noret_i32(i32* %ptr) nounwind {
293 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* %ptr, i32 42, i32 0, i32 0, i1 false)
297 ; GCN-LABEL: {{^}}flat_atomic_inc_noret_i32_offset:
298 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
299 ; CIVI: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
300 ; GFX9: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:16{{$}}
301 define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset(i32* %ptr) nounwind {
302 %gep = getelementptr i32, i32* %ptr, i32 4
303 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* %gep, i32 42, i32 0, i32 0, i1 false)
307 ; GCN-LABEL: {{^}}flat_atomic_inc_ret_i32_offset_addr64:
308 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
309 ; CIVI: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] glc{{$}}
310 ; GFX9: flat_atomic_inc v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:20 glc{{$}}
311 define amdgpu_kernel void @flat_atomic_inc_ret_i32_offset_addr64(i32* %out, i32* %ptr) #0 {
312 %id = call i32 @llvm.amdgcn.workitem.id.x()
313 %gep.tid = getelementptr i32, i32* %ptr, i32 %id
314 %out.gep = getelementptr i32, i32* %out, i32 %id
315 %gep = getelementptr i32, i32* %gep.tid, i32 5
316 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* %gep, i32 42, i32 0, i32 0, i1 false)
317 store i32 %result, i32* %out.gep
321 ; GCN-LABEL: {{^}}flat_atomic_inc_noret_i32_offset_addr64:
322 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
323 ; CIVI: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]]{{$}}
324 ; GFX9: flat_atomic_inc v{{\[[0-9]+:[0-9]+\]}}, [[K]] offset:20{{$}}
325 define amdgpu_kernel void @flat_atomic_inc_noret_i32_offset_addr64(i32* %ptr) #0 {
326 %id = call i32 @llvm.amdgcn.workitem.id.x()
327 %gep.tid = getelementptr i32, i32* %ptr, i32 %id
328 %gep = getelementptr i32, i32* %gep.tid, i32 5
329 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p0i32(i32* %gep, i32 42, i32 0, i32 0, i1 false)
333 @lds1 = addrspace(3) global [512 x i64] undef, align 8
335 ; GCN-LABEL: {{^}}atomic_inc_shl_base_lds_0_i64:
336 ; CIVI-DAG: v_lshlrev_b32_e32 [[OFS:v[0-9]+]], 3, {{v[0-9]+}}
337 ; CIVI-DAG: v_add_{{[ui]}}32_e32 [[PTR:v[0-9]+]], vcc, lds1@abs32@lo, [[OFS]]
338 ; GFX9-DAG: v_mov_b32_e32 [[BASE:v[0-9]+]], lds1@abs32@lo
339 ; GFX9-DAG: v_lshl_add_u32 [[PTR:v[0-9]+]], {{v[0-9]+}}, 3, [[BASE]]
340 ; GCN: ds_inc_rtn_u64 v{{\[[0-9]+:[0-9]+\]}}, [[PTR]], v{{\[[0-9]+:[0-9]+\]}} offset:16
341 define amdgpu_kernel void @atomic_inc_shl_base_lds_0_i64(i64 addrspace(1)* %out, i32 addrspace(1)* %add_use) #0 {
342 %tid.x = tail call i32 @llvm.amdgcn.workitem.id.x() #1
343 %idx.0 = add nsw i32 %tid.x, 2
344 %arrayidx0 = getelementptr inbounds [512 x i64], [512 x i64] addrspace(3)* @lds1, i32 0, i32 %idx.0
345 %val0 = call i64 @llvm.amdgcn.atomic.inc.i64.p3i64(i64 addrspace(3)* %arrayidx0, i64 9, i32 0, i32 0, i1 false)
346 store i32 %idx.0, i32 addrspace(1)* %add_use
347 store i64 %val0, i64 addrspace(1)* %out
351 ; GCN-LABEL: {{^}}flat_atomic_inc_ret_i64:
352 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
353 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
354 ; GCN: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
355 define amdgpu_kernel void @flat_atomic_inc_ret_i64(i64* %out, i64* %ptr) #0 {
356 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %ptr, i64 42, i32 0, i32 0, i1 false)
357 store i64 %result, i64* %out
361 ; GCN-LABEL: {{^}}flat_atomic_inc_ret_i64_offset:
362 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
363 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
364 ; CIVI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
365 ; GFX9: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:32 glc{{$}}
366 define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset(i64* %out, i64* %ptr) #0 {
367 %gep = getelementptr i64, i64* %ptr, i32 4
368 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %gep, i64 42, i32 0, i32 0, i1 false)
369 store i64 %result, i64* %out
373 ; GCN-LABEL: {{^}}flat_atomic_inc_noret_i64:
374 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
375 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
376 ; GCN: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
377 define amdgpu_kernel void @flat_atomic_inc_noret_i64(i64* %ptr) nounwind {
378 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %ptr, i64 42, i32 0, i32 0, i1 false)
382 ; GCN-LABEL: {{^}}flat_atomic_inc_noret_i64_offset:
383 ; GCN-DAG: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
384 ; GCN-DAG: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
385 ; CIVI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
386 ; GFX9: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:32{{$}}
387 define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset(i64* %ptr) nounwind {
388 %gep = getelementptr i64, i64* %ptr, i32 4
389 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %gep, i64 42, i32 0, i32 0, i1 false)
393 ; GCN-LABEL: {{^}}flat_atomic_inc_ret_i64_offset_addr64:
394 ; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
395 ; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
396 ; CIVI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} glc{{$}}
397 ; GFX9: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:40 glc{{$}}
398 define amdgpu_kernel void @flat_atomic_inc_ret_i64_offset_addr64(i64* %out, i64* %ptr) #0 {
399 %id = call i32 @llvm.amdgcn.workitem.id.x()
400 %gep.tid = getelementptr i64, i64* %ptr, i32 %id
401 %out.gep = getelementptr i64, i64* %out, i32 %id
402 %gep = getelementptr i64, i64* %gep.tid, i32 5
403 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %gep, i64 42, i32 0, i32 0, i1 false)
404 store i64 %result, i64* %out.gep
408 ; GCN-LABEL: {{^}}flat_atomic_inc_noret_i64_offset_addr64:
409 ; GCN: v_mov_b32_e32 v[[KLO:[0-9]+]], 42
410 ; GCN: v_mov_b32_e32 v[[KHI:[0-9]+]], 0{{$}}
411 ; CIVI: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]$}}
412 ; GFX9: flat_atomic_inc_x2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[KLO]]:[[KHI]]{{\]}} offset:40{{$}}
413 define amdgpu_kernel void @flat_atomic_inc_noret_i64_offset_addr64(i64* %ptr) #0 {
414 %id = call i32 @llvm.amdgcn.workitem.id.x()
415 %gep.tid = getelementptr i64, i64* %ptr, i32 %id
416 %gep = getelementptr i64, i64* %gep.tid, i32 5
417 %result = call i64 @llvm.amdgcn.atomic.inc.i64.p0i64(i64* %gep, i64 42, i32 0, i32 0, i1 false)
421 ; GCN-LABEL: {{^}}nocse_lds_atomic_inc_ret_i32:
422 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 42
423 ; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]]
424 ; GCN: ds_inc_rtn_u32 v{{[0-9]+}}, v{{[0-9]+}}, [[K]]
425 define amdgpu_kernel void @nocse_lds_atomic_inc_ret_i32(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(3)* %ptr) #0 {
426 %result0 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
427 %result1 = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %ptr, i32 42, i32 0, i32 0, i1 false)
429 store i32 %result0, i32 addrspace(1)* %out0
430 store i32 %result1, i32 addrspace(1)* %out1
434 attributes #0 = { nounwind }
435 attributes #1 = { nounwind readnone }
436 attributes #2 = { nounwind argmemonly }