1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN1 %s
3 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN2 %s
5 define amdgpu_kernel void @atomic_add_i64_offset(ptr %out, i64 %in) {
6 ; GCN1-LABEL: atomic_add_i64_offset:
7 ; GCN1: ; %bb.0: ; %entry
8 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
9 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
10 ; GCN1-NEXT: s_add_u32 s0, s0, 32
11 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
12 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
13 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
14 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
15 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
16 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
17 ; GCN1-NEXT: flat_atomic_add_x2 v[2:3], v[0:1]
18 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
19 ; GCN1-NEXT: buffer_wbinvl1_vol
22 ; GCN2-LABEL: atomic_add_i64_offset:
23 ; GCN2: ; %bb.0: ; %entry
24 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
25 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
26 ; GCN2-NEXT: s_add_u32 s0, s0, 32
27 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
28 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
29 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
30 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
31 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
32 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
33 ; GCN2-NEXT: flat_atomic_add_x2 v[2:3], v[0:1]
34 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
35 ; GCN2-NEXT: buffer_wbinvl1_vol
38 %gep = getelementptr i64, ptr %out, i64 4
39 %tmp0 = atomicrmw volatile add ptr %gep, i64 %in syncscope("agent") seq_cst
43 define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
44 ; GCN1-LABEL: atomic_add_i64_ret_offset:
45 ; GCN1: ; %bb.0: ; %entry
46 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
47 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
48 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
49 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
50 ; GCN1-NEXT: s_add_u32 s0, s0, 32
51 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
52 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
53 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
54 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
55 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
56 ; GCN1-NEXT: flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
57 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
58 ; GCN1-NEXT: buffer_wbinvl1_vol
59 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
60 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
61 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
64 ; GCN2-LABEL: atomic_add_i64_ret_offset:
65 ; GCN2: ; %bb.0: ; %entry
66 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
67 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
68 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
69 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
70 ; GCN2-NEXT: s_add_u32 s0, s0, 32
71 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
72 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
73 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
74 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
75 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
76 ; GCN2-NEXT: flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
77 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
78 ; GCN2-NEXT: buffer_wbinvl1_vol
79 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
80 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
81 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
84 %gep = getelementptr i64, ptr %out, i64 4
85 %tmp0 = atomicrmw volatile add ptr %gep, i64 %in syncscope("agent") seq_cst
86 store i64 %tmp0, ptr %out2
90 define amdgpu_kernel void @atomic_add_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
91 ; GCN1-LABEL: atomic_add_i64_addr64_offset:
92 ; GCN1: ; %bb.0: ; %entry
93 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
94 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
95 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
96 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
97 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
98 ; GCN1-NEXT: s_add_u32 s0, s4, s0
99 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
100 ; GCN1-NEXT: s_add_u32 s0, s0, 32
101 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
102 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
103 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
104 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
105 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
106 ; GCN1-NEXT: flat_atomic_add_x2 v[2:3], v[0:1]
107 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
108 ; GCN1-NEXT: buffer_wbinvl1_vol
109 ; GCN1-NEXT: s_endpgm
111 ; GCN2-LABEL: atomic_add_i64_addr64_offset:
112 ; GCN2: ; %bb.0: ; %entry
113 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
114 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
115 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
116 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
117 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
118 ; GCN2-NEXT: s_add_u32 s0, s4, s0
119 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
120 ; GCN2-NEXT: s_add_u32 s0, s0, 32
121 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
122 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
123 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
124 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
125 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
126 ; GCN2-NEXT: flat_atomic_add_x2 v[2:3], v[0:1]
127 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
128 ; GCN2-NEXT: buffer_wbinvl1_vol
129 ; GCN2-NEXT: s_endpgm
131 %ptr = getelementptr i64, ptr %out, i64 %index
132 %gep = getelementptr i64, ptr %ptr, i64 4
133 %tmp0 = atomicrmw volatile add ptr %gep, i64 %in syncscope("agent") seq_cst
137 define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
138 ; GCN1-LABEL: atomic_add_i64_ret_addr64_offset:
139 ; GCN1: ; %bb.0: ; %entry
140 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
141 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
142 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
143 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
144 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
145 ; GCN1-NEXT: s_add_u32 s0, s0, s4
146 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
147 ; GCN1-NEXT: s_add_u32 s0, s0, 32
148 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
149 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
150 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
151 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
152 ; GCN1-NEXT: flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
153 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
154 ; GCN1-NEXT: buffer_wbinvl1_vol
155 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
156 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
157 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
158 ; GCN1-NEXT: s_endpgm
160 ; GCN2-LABEL: atomic_add_i64_ret_addr64_offset:
161 ; GCN2: ; %bb.0: ; %entry
162 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
163 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
164 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
165 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
166 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
167 ; GCN2-NEXT: s_add_u32 s0, s0, s4
168 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
169 ; GCN2-NEXT: s_add_u32 s0, s0, 32
170 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
171 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
172 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
173 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
174 ; GCN2-NEXT: flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
175 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
176 ; GCN2-NEXT: buffer_wbinvl1_vol
177 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
178 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
179 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
180 ; GCN2-NEXT: s_endpgm
182 %ptr = getelementptr i64, ptr %out, i64 %index
183 %gep = getelementptr i64, ptr %ptr, i64 4
184 %tmp0 = atomicrmw volatile add ptr %gep, i64 %in syncscope("agent") seq_cst
185 store i64 %tmp0, ptr %out2
189 define amdgpu_kernel void @atomic_add_i64(ptr %out, i64 %in) {
190 ; GCN1-LABEL: atomic_add_i64:
191 ; GCN1: ; %bb.0: ; %entry
192 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
193 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
194 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
195 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
196 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
197 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
198 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
199 ; GCN1-NEXT: flat_atomic_add_x2 v[0:1], v[2:3]
200 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
201 ; GCN1-NEXT: buffer_wbinvl1_vol
202 ; GCN1-NEXT: s_endpgm
204 ; GCN2-LABEL: atomic_add_i64:
205 ; GCN2: ; %bb.0: ; %entry
206 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
207 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
208 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
209 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
210 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
211 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
212 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
213 ; GCN2-NEXT: flat_atomic_add_x2 v[0:1], v[2:3]
214 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
215 ; GCN2-NEXT: buffer_wbinvl1_vol
216 ; GCN2-NEXT: s_endpgm
218 %tmp0 = atomicrmw volatile add ptr %out, i64 %in syncscope("agent") seq_cst
222 define amdgpu_kernel void @atomic_add_i64_ret(ptr %out, ptr %out2, i64 %in) {
223 ; GCN1-LABEL: atomic_add_i64_ret:
224 ; GCN1: ; %bb.0: ; %entry
225 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
226 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
227 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
228 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
229 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
230 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
231 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
232 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
233 ; GCN1-NEXT: flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
234 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
235 ; GCN1-NEXT: buffer_wbinvl1_vol
236 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
237 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
238 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
239 ; GCN1-NEXT: s_endpgm
241 ; GCN2-LABEL: atomic_add_i64_ret:
242 ; GCN2: ; %bb.0: ; %entry
243 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
244 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
245 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
246 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
247 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
248 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
249 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
250 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
251 ; GCN2-NEXT: flat_atomic_add_x2 v[0:1], v[0:1], v[2:3] glc
252 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
253 ; GCN2-NEXT: buffer_wbinvl1_vol
254 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
255 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
256 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
257 ; GCN2-NEXT: s_endpgm
259 %tmp0 = atomicrmw volatile add ptr %out, i64 %in syncscope("agent") seq_cst
260 store i64 %tmp0, ptr %out2
264 define amdgpu_kernel void @atomic_add_i64_addr64(ptr %out, i64 %in, i64 %index) {
265 ; GCN1-LABEL: atomic_add_i64_addr64:
266 ; GCN1: ; %bb.0: ; %entry
267 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
268 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
269 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
270 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
271 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
272 ; GCN1-NEXT: s_add_u32 s0, s4, s0
273 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
274 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
275 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
276 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
277 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
278 ; GCN1-NEXT: flat_atomic_add_x2 v[2:3], v[0:1]
279 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
280 ; GCN1-NEXT: buffer_wbinvl1_vol
281 ; GCN1-NEXT: s_endpgm
283 ; GCN2-LABEL: atomic_add_i64_addr64:
284 ; GCN2: ; %bb.0: ; %entry
285 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
286 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
287 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
288 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
289 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
290 ; GCN2-NEXT: s_add_u32 s0, s4, s0
291 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
292 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
293 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
294 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
295 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
296 ; GCN2-NEXT: flat_atomic_add_x2 v[2:3], v[0:1]
297 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
298 ; GCN2-NEXT: buffer_wbinvl1_vol
299 ; GCN2-NEXT: s_endpgm
301 %ptr = getelementptr i64, ptr %out, i64 %index
302 %tmp0 = atomicrmw volatile add ptr %ptr, i64 %in syncscope("agent") seq_cst
306 define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
307 ; GCN1-LABEL: atomic_add_i64_ret_addr64:
308 ; GCN1: ; %bb.0: ; %entry
309 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
310 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
311 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
312 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
313 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
314 ; GCN1-NEXT: s_add_u32 s0, s0, s4
315 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
316 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
317 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
318 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
319 ; GCN1-NEXT: flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
320 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
321 ; GCN1-NEXT: buffer_wbinvl1_vol
322 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
323 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
324 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
325 ; GCN1-NEXT: s_endpgm
327 ; GCN2-LABEL: atomic_add_i64_ret_addr64:
328 ; GCN2: ; %bb.0: ; %entry
329 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
330 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
331 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
332 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
333 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
334 ; GCN2-NEXT: s_add_u32 s0, s0, s4
335 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
336 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
337 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
338 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
339 ; GCN2-NEXT: flat_atomic_add_x2 v[0:1], v[2:3], v[0:1] glc
340 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
341 ; GCN2-NEXT: buffer_wbinvl1_vol
342 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
343 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
344 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
345 ; GCN2-NEXT: s_endpgm
347 %ptr = getelementptr i64, ptr %out, i64 %index
348 %tmp0 = atomicrmw volatile add ptr %ptr, i64 %in syncscope("agent") seq_cst
349 store i64 %tmp0, ptr %out2
353 define amdgpu_kernel void @atomic_and_i64_offset(ptr %out, i64 %in) {
354 ; GCN1-LABEL: atomic_and_i64_offset:
355 ; GCN1: ; %bb.0: ; %entry
356 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
357 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
358 ; GCN1-NEXT: s_add_u32 s0, s0, 32
359 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
360 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
361 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
362 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
363 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
364 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
365 ; GCN1-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
366 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
367 ; GCN1-NEXT: buffer_wbinvl1_vol
368 ; GCN1-NEXT: s_endpgm
370 ; GCN2-LABEL: atomic_and_i64_offset:
371 ; GCN2: ; %bb.0: ; %entry
372 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
373 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
374 ; GCN2-NEXT: s_add_u32 s0, s0, 32
375 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
376 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
377 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
378 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
379 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
380 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
381 ; GCN2-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
382 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
383 ; GCN2-NEXT: buffer_wbinvl1_vol
384 ; GCN2-NEXT: s_endpgm
386 %gep = getelementptr i64, ptr %out, i64 4
387 %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst
391 define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
392 ; GCN1-LABEL: atomic_and_i64_ret_offset:
393 ; GCN1: ; %bb.0: ; %entry
394 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
395 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
396 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
397 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
398 ; GCN1-NEXT: s_add_u32 s0, s0, 32
399 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
400 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
401 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
402 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
403 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
404 ; GCN1-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
405 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
406 ; GCN1-NEXT: buffer_wbinvl1_vol
407 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
408 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
409 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
410 ; GCN1-NEXT: s_endpgm
412 ; GCN2-LABEL: atomic_and_i64_ret_offset:
413 ; GCN2: ; %bb.0: ; %entry
414 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
415 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
416 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
417 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
418 ; GCN2-NEXT: s_add_u32 s0, s0, 32
419 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
420 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
421 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
422 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
423 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
424 ; GCN2-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
425 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
426 ; GCN2-NEXT: buffer_wbinvl1_vol
427 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
428 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
429 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
430 ; GCN2-NEXT: s_endpgm
432 %gep = getelementptr i64, ptr %out, i64 4
433 %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst
434 store i64 %tmp0, ptr %out2
438 define amdgpu_kernel void @atomic_and_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
439 ; GCN1-LABEL: atomic_and_i64_addr64_offset:
440 ; GCN1: ; %bb.0: ; %entry
441 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
442 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
443 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
444 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
445 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
446 ; GCN1-NEXT: s_add_u32 s0, s4, s0
447 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
448 ; GCN1-NEXT: s_add_u32 s0, s0, 32
449 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
450 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
451 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
452 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
453 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
454 ; GCN1-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
455 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
456 ; GCN1-NEXT: buffer_wbinvl1_vol
457 ; GCN1-NEXT: s_endpgm
459 ; GCN2-LABEL: atomic_and_i64_addr64_offset:
460 ; GCN2: ; %bb.0: ; %entry
461 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
462 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
463 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
464 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
465 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
466 ; GCN2-NEXT: s_add_u32 s0, s4, s0
467 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
468 ; GCN2-NEXT: s_add_u32 s0, s0, 32
469 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
470 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
471 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
472 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
473 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
474 ; GCN2-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
475 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
476 ; GCN2-NEXT: buffer_wbinvl1_vol
477 ; GCN2-NEXT: s_endpgm
479 %ptr = getelementptr i64, ptr %out, i64 %index
480 %gep = getelementptr i64, ptr %ptr, i64 4
481 %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst
485 define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
486 ; GCN1-LABEL: atomic_and_i64_ret_addr64_offset:
487 ; GCN1: ; %bb.0: ; %entry
488 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
489 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
490 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
491 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
492 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
493 ; GCN1-NEXT: s_add_u32 s0, s0, s4
494 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
495 ; GCN1-NEXT: s_add_u32 s0, s0, 32
496 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
497 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
498 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
499 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
500 ; GCN1-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
501 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
502 ; GCN1-NEXT: buffer_wbinvl1_vol
503 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
504 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
505 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
506 ; GCN1-NEXT: s_endpgm
508 ; GCN2-LABEL: atomic_and_i64_ret_addr64_offset:
509 ; GCN2: ; %bb.0: ; %entry
510 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
511 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
512 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
513 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
514 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
515 ; GCN2-NEXT: s_add_u32 s0, s0, s4
516 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
517 ; GCN2-NEXT: s_add_u32 s0, s0, 32
518 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
519 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
520 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
521 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
522 ; GCN2-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
523 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
524 ; GCN2-NEXT: buffer_wbinvl1_vol
525 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
526 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
527 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
528 ; GCN2-NEXT: s_endpgm
530 %ptr = getelementptr i64, ptr %out, i64 %index
531 %gep = getelementptr i64, ptr %ptr, i64 4
532 %tmp0 = atomicrmw volatile and ptr %gep, i64 %in syncscope("agent") seq_cst
533 store i64 %tmp0, ptr %out2
537 define amdgpu_kernel void @atomic_and_i64(ptr %out, i64 %in) {
538 ; GCN1-LABEL: atomic_and_i64:
539 ; GCN1: ; %bb.0: ; %entry
540 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
541 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
542 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
543 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
544 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
545 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
546 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
547 ; GCN1-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
548 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
549 ; GCN1-NEXT: buffer_wbinvl1_vol
550 ; GCN1-NEXT: s_endpgm
552 ; GCN2-LABEL: atomic_and_i64:
553 ; GCN2: ; %bb.0: ; %entry
554 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
555 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
556 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
557 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
558 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
559 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
560 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
561 ; GCN2-NEXT: flat_atomic_and_x2 v[0:1], v[2:3]
562 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
563 ; GCN2-NEXT: buffer_wbinvl1_vol
564 ; GCN2-NEXT: s_endpgm
566 %tmp0 = atomicrmw volatile and ptr %out, i64 %in syncscope("agent") seq_cst
570 define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
571 ; GCN1-LABEL: atomic_and_i64_ret:
572 ; GCN1: ; %bb.0: ; %entry
573 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
574 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
575 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
576 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
577 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
578 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
579 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
580 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
581 ; GCN1-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
582 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
583 ; GCN1-NEXT: buffer_wbinvl1_vol
584 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
585 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
586 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
587 ; GCN1-NEXT: s_endpgm
589 ; GCN2-LABEL: atomic_and_i64_ret:
590 ; GCN2: ; %bb.0: ; %entry
591 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
592 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
593 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
594 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
595 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
596 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
597 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
598 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
599 ; GCN2-NEXT: flat_atomic_and_x2 v[0:1], v[0:1], v[2:3] glc
600 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
601 ; GCN2-NEXT: buffer_wbinvl1_vol
602 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
603 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
604 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
605 ; GCN2-NEXT: s_endpgm
607 %tmp0 = atomicrmw volatile and ptr %out, i64 %in syncscope("agent") seq_cst
608 store i64 %tmp0, ptr %out2
612 define amdgpu_kernel void @atomic_and_i64_addr64(ptr %out, i64 %in, i64 %index) {
613 ; GCN1-LABEL: atomic_and_i64_addr64:
614 ; GCN1: ; %bb.0: ; %entry
615 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
616 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
617 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
618 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
619 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
620 ; GCN1-NEXT: s_add_u32 s0, s4, s0
621 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
622 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
623 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
624 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
625 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
626 ; GCN1-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
627 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
628 ; GCN1-NEXT: buffer_wbinvl1_vol
629 ; GCN1-NEXT: s_endpgm
631 ; GCN2-LABEL: atomic_and_i64_addr64:
632 ; GCN2: ; %bb.0: ; %entry
633 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
634 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
635 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
636 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
637 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
638 ; GCN2-NEXT: s_add_u32 s0, s4, s0
639 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
640 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
641 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
642 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
643 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
644 ; GCN2-NEXT: flat_atomic_and_x2 v[2:3], v[0:1]
645 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
646 ; GCN2-NEXT: buffer_wbinvl1_vol
647 ; GCN2-NEXT: s_endpgm
649 %ptr = getelementptr i64, ptr %out, i64 %index
650 %tmp0 = atomicrmw volatile and ptr %ptr, i64 %in syncscope("agent") seq_cst
654 define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
655 ; GCN1-LABEL: atomic_and_i64_ret_addr64:
656 ; GCN1: ; %bb.0: ; %entry
657 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
658 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
659 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
660 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
661 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
662 ; GCN1-NEXT: s_add_u32 s0, s0, s4
663 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
664 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
665 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
666 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
667 ; GCN1-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
668 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
669 ; GCN1-NEXT: buffer_wbinvl1_vol
670 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
671 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
672 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
673 ; GCN1-NEXT: s_endpgm
675 ; GCN2-LABEL: atomic_and_i64_ret_addr64:
676 ; GCN2: ; %bb.0: ; %entry
677 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
678 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
679 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
680 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
681 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
682 ; GCN2-NEXT: s_add_u32 s0, s0, s4
683 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
684 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
685 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
686 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
687 ; GCN2-NEXT: flat_atomic_and_x2 v[0:1], v[2:3], v[0:1] glc
688 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
689 ; GCN2-NEXT: buffer_wbinvl1_vol
690 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
691 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
692 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
693 ; GCN2-NEXT: s_endpgm
695 %ptr = getelementptr i64, ptr %out, i64 %index
696 %tmp0 = atomicrmw volatile and ptr %ptr, i64 %in syncscope("agent") seq_cst
697 store i64 %tmp0, ptr %out2
701 define amdgpu_kernel void @atomic_sub_i64_offset(ptr %out, i64 %in) {
702 ; GCN1-LABEL: atomic_sub_i64_offset:
703 ; GCN1: ; %bb.0: ; %entry
704 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
705 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
706 ; GCN1-NEXT: s_add_u32 s0, s0, 32
707 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
708 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
709 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
710 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
711 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
712 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
713 ; GCN1-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
714 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
715 ; GCN1-NEXT: buffer_wbinvl1_vol
716 ; GCN1-NEXT: s_endpgm
718 ; GCN2-LABEL: atomic_sub_i64_offset:
719 ; GCN2: ; %bb.0: ; %entry
720 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
721 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
722 ; GCN2-NEXT: s_add_u32 s0, s0, 32
723 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
724 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
725 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
726 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
727 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
728 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
729 ; GCN2-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
730 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
731 ; GCN2-NEXT: buffer_wbinvl1_vol
732 ; GCN2-NEXT: s_endpgm
734 %gep = getelementptr i64, ptr %out, i64 4
735 %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst
739 define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
740 ; GCN1-LABEL: atomic_sub_i64_ret_offset:
741 ; GCN1: ; %bb.0: ; %entry
742 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
743 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
744 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
745 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
746 ; GCN1-NEXT: s_add_u32 s0, s0, 32
747 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
748 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
749 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
750 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
751 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
752 ; GCN1-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
753 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
754 ; GCN1-NEXT: buffer_wbinvl1_vol
755 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
756 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
757 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
758 ; GCN1-NEXT: s_endpgm
760 ; GCN2-LABEL: atomic_sub_i64_ret_offset:
761 ; GCN2: ; %bb.0: ; %entry
762 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
763 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
764 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
765 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
766 ; GCN2-NEXT: s_add_u32 s0, s0, 32
767 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
768 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
769 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
770 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
771 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
772 ; GCN2-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
773 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
774 ; GCN2-NEXT: buffer_wbinvl1_vol
775 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
776 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
777 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
778 ; GCN2-NEXT: s_endpgm
780 %gep = getelementptr i64, ptr %out, i64 4
781 %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst
782 store i64 %tmp0, ptr %out2
786 define amdgpu_kernel void @atomic_sub_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
787 ; GCN1-LABEL: atomic_sub_i64_addr64_offset:
788 ; GCN1: ; %bb.0: ; %entry
789 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
790 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
791 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
792 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
793 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
794 ; GCN1-NEXT: s_add_u32 s0, s4, s0
795 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
796 ; GCN1-NEXT: s_add_u32 s0, s0, 32
797 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
798 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
799 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
800 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
801 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
802 ; GCN1-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
803 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
804 ; GCN1-NEXT: buffer_wbinvl1_vol
805 ; GCN1-NEXT: s_endpgm
807 ; GCN2-LABEL: atomic_sub_i64_addr64_offset:
808 ; GCN2: ; %bb.0: ; %entry
809 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
810 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
811 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
812 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
813 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
814 ; GCN2-NEXT: s_add_u32 s0, s4, s0
815 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
816 ; GCN2-NEXT: s_add_u32 s0, s0, 32
817 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
818 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
819 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
820 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
821 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
822 ; GCN2-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
823 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
824 ; GCN2-NEXT: buffer_wbinvl1_vol
825 ; GCN2-NEXT: s_endpgm
827 %ptr = getelementptr i64, ptr %out, i64 %index
828 %gep = getelementptr i64, ptr %ptr, i64 4
829 %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst
833 define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
834 ; GCN1-LABEL: atomic_sub_i64_ret_addr64_offset:
835 ; GCN1: ; %bb.0: ; %entry
836 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
837 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
838 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
839 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
840 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
841 ; GCN1-NEXT: s_add_u32 s0, s0, s4
842 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
843 ; GCN1-NEXT: s_add_u32 s0, s0, 32
844 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
845 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
846 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
847 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
848 ; GCN1-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
849 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
850 ; GCN1-NEXT: buffer_wbinvl1_vol
851 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
852 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
853 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
854 ; GCN1-NEXT: s_endpgm
856 ; GCN2-LABEL: atomic_sub_i64_ret_addr64_offset:
857 ; GCN2: ; %bb.0: ; %entry
858 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
859 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
860 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
861 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
862 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
863 ; GCN2-NEXT: s_add_u32 s0, s0, s4
864 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
865 ; GCN2-NEXT: s_add_u32 s0, s0, 32
866 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
867 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
868 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
869 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
870 ; GCN2-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
871 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
872 ; GCN2-NEXT: buffer_wbinvl1_vol
873 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
874 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
875 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
876 ; GCN2-NEXT: s_endpgm
878 %ptr = getelementptr i64, ptr %out, i64 %index
879 %gep = getelementptr i64, ptr %ptr, i64 4
880 %tmp0 = atomicrmw volatile sub ptr %gep, i64 %in syncscope("agent") seq_cst
881 store i64 %tmp0, ptr %out2
885 define amdgpu_kernel void @atomic_sub_i64(ptr %out, i64 %in) {
886 ; GCN1-LABEL: atomic_sub_i64:
887 ; GCN1: ; %bb.0: ; %entry
888 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
889 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
890 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
891 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
892 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
893 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
894 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
895 ; GCN1-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
896 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
897 ; GCN1-NEXT: buffer_wbinvl1_vol
898 ; GCN1-NEXT: s_endpgm
900 ; GCN2-LABEL: atomic_sub_i64:
901 ; GCN2: ; %bb.0: ; %entry
902 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
903 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
904 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
905 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
906 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
907 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
908 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
909 ; GCN2-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3]
910 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
911 ; GCN2-NEXT: buffer_wbinvl1_vol
912 ; GCN2-NEXT: s_endpgm
914 %tmp0 = atomicrmw volatile sub ptr %out, i64 %in syncscope("agent") seq_cst
918 define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
919 ; GCN1-LABEL: atomic_sub_i64_ret:
920 ; GCN1: ; %bb.0: ; %entry
921 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
922 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
923 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
924 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
925 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
926 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
927 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
928 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
929 ; GCN1-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
930 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
931 ; GCN1-NEXT: buffer_wbinvl1_vol
932 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
933 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
934 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
935 ; GCN1-NEXT: s_endpgm
937 ; GCN2-LABEL: atomic_sub_i64_ret:
938 ; GCN2: ; %bb.0: ; %entry
939 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
940 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
941 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
942 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
943 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
944 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
945 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
946 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
947 ; GCN2-NEXT: flat_atomic_sub_x2 v[0:1], v[0:1], v[2:3] glc
948 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
949 ; GCN2-NEXT: buffer_wbinvl1_vol
950 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
951 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
952 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
953 ; GCN2-NEXT: s_endpgm
955 %tmp0 = atomicrmw volatile sub ptr %out, i64 %in syncscope("agent") seq_cst
956 store i64 %tmp0, ptr %out2
960 define amdgpu_kernel void @atomic_sub_i64_addr64(ptr %out, i64 %in, i64 %index) {
961 ; GCN1-LABEL: atomic_sub_i64_addr64:
962 ; GCN1: ; %bb.0: ; %entry
963 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
964 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
965 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
966 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
967 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
968 ; GCN1-NEXT: s_add_u32 s0, s4, s0
969 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
970 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
971 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
972 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
973 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
974 ; GCN1-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
975 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
976 ; GCN1-NEXT: buffer_wbinvl1_vol
977 ; GCN1-NEXT: s_endpgm
979 ; GCN2-LABEL: atomic_sub_i64_addr64:
980 ; GCN2: ; %bb.0: ; %entry
981 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
982 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
983 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
984 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
985 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
986 ; GCN2-NEXT: s_add_u32 s0, s4, s0
987 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
988 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
989 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
990 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
991 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
992 ; GCN2-NEXT: flat_atomic_sub_x2 v[2:3], v[0:1]
993 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
994 ; GCN2-NEXT: buffer_wbinvl1_vol
995 ; GCN2-NEXT: s_endpgm
997 %ptr = getelementptr i64, ptr %out, i64 %index
998 %tmp0 = atomicrmw volatile sub ptr %ptr, i64 %in syncscope("agent") seq_cst
1002 define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
1003 ; GCN1-LABEL: atomic_sub_i64_ret_addr64:
1004 ; GCN1: ; %bb.0: ; %entry
1005 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
1006 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1007 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1008 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1009 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1010 ; GCN1-NEXT: s_add_u32 s0, s0, s4
1011 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
1012 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1013 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1014 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
1015 ; GCN1-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
1016 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
1017 ; GCN1-NEXT: buffer_wbinvl1_vol
1018 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1019 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1020 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1021 ; GCN1-NEXT: s_endpgm
1023 ; GCN2-LABEL: atomic_sub_i64_ret_addr64:
1024 ; GCN2: ; %bb.0: ; %entry
1025 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
1026 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1027 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1028 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1029 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1030 ; GCN2-NEXT: s_add_u32 s0, s0, s4
1031 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
1032 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1033 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1034 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
1035 ; GCN2-NEXT: flat_atomic_sub_x2 v[0:1], v[2:3], v[0:1] glc
1036 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
1037 ; GCN2-NEXT: buffer_wbinvl1_vol
1038 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1039 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1040 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1041 ; GCN2-NEXT: s_endpgm
1043 %ptr = getelementptr i64, ptr %out, i64 %index
1044 %tmp0 = atomicrmw volatile sub ptr %ptr, i64 %in syncscope("agent") seq_cst
1045 store i64 %tmp0, ptr %out2
1049 define amdgpu_kernel void @atomic_max_i64_offset(ptr %out, i64 %in) {
1050 ; GCN1-LABEL: atomic_max_i64_offset:
1051 ; GCN1: ; %bb.0: ; %entry
1052 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1053 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1054 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1055 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1056 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1057 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
1058 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
1059 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1060 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1061 ; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
1062 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1063 ; GCN1-NEXT: s_endpgm
1065 ; GCN2-LABEL: atomic_max_i64_offset:
1066 ; GCN2: ; %bb.0: ; %entry
1067 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1068 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1069 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1070 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1071 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1072 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
1073 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
1074 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1075 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1076 ; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
1077 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1078 ; GCN2-NEXT: s_endpgm
1080 %gep = getelementptr i64, ptr %out, i64 4
1081 %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst
1085 define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
1086 ; GCN1-LABEL: atomic_max_i64_ret_offset:
1087 ; GCN1: ; %bb.0: ; %entry
1088 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
1089 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1090 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1091 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1092 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1093 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1094 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1095 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1096 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1097 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1098 ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
1099 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1100 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1101 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1102 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1103 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1104 ; GCN1-NEXT: s_endpgm
1106 ; GCN2-LABEL: atomic_max_i64_ret_offset:
1107 ; GCN2: ; %bb.0: ; %entry
1108 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
1109 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1110 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1111 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1112 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1113 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1114 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1115 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1116 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1117 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1118 ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
1119 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1120 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1121 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1122 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1123 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1124 ; GCN2-NEXT: s_endpgm
1126 %gep = getelementptr i64, ptr %out, i64 4
1127 %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst
1128 store i64 %tmp0, ptr %out2
1132 define amdgpu_kernel void @atomic_max_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
1133 ; GCN1-LABEL: atomic_max_i64_addr64_offset:
1134 ; GCN1: ; %bb.0: ; %entry
1135 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1136 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1137 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1138 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
1139 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1140 ; GCN1-NEXT: s_add_u32 s0, s4, s0
1141 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
1142 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1143 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1144 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1145 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
1146 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1147 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1148 ; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
1149 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1150 ; GCN1-NEXT: s_endpgm
1152 ; GCN2-LABEL: atomic_max_i64_addr64_offset:
1153 ; GCN2: ; %bb.0: ; %entry
1154 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
1155 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
1156 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1157 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
1158 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1159 ; GCN2-NEXT: s_add_u32 s0, s4, s0
1160 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
1161 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1162 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1163 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1164 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
1165 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1166 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1167 ; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
1168 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1169 ; GCN2-NEXT: s_endpgm
1171 %ptr = getelementptr i64, ptr %out, i64 %index
1172 %gep = getelementptr i64, ptr %ptr, i64 4
1173 %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst
1177 define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
1178 ; GCN1-LABEL: atomic_max_i64_ret_addr64_offset:
1179 ; GCN1: ; %bb.0: ; %entry
1180 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
1181 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1182 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1183 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1184 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1185 ; GCN1-NEXT: s_add_u32 s0, s0, s4
1186 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
1187 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1188 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1189 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1190 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1191 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1192 ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
1193 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1194 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1195 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1196 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1197 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1198 ; GCN1-NEXT: s_endpgm
1200 ; GCN2-LABEL: atomic_max_i64_ret_addr64_offset:
1201 ; GCN2: ; %bb.0: ; %entry
1202 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
1203 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1204 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1205 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1206 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1207 ; GCN2-NEXT: s_add_u32 s0, s0, s4
1208 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
1209 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1210 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1211 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1212 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1213 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1214 ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
1215 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1216 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1217 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1218 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1219 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1220 ; GCN2-NEXT: s_endpgm
1222 %ptr = getelementptr i64, ptr %out, i64 %index
1223 %gep = getelementptr i64, ptr %ptr, i64 4
1224 %tmp0 = atomicrmw volatile max ptr %gep, i64 %in syncscope("workgroup") seq_cst
1225 store i64 %tmp0, ptr %out2
1229 define amdgpu_kernel void @atomic_max_i64(ptr %out, i64 %in) {
1230 ; GCN1-LABEL: atomic_max_i64:
1231 ; GCN1: ; %bb.0: ; %entry
1232 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1233 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1234 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
1235 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
1236 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1237 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1238 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1239 ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
1240 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1241 ; GCN1-NEXT: s_endpgm
1243 ; GCN2-LABEL: atomic_max_i64:
1244 ; GCN2: ; %bb.0: ; %entry
1245 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1246 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1247 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
1248 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
1249 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1250 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1251 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1252 ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3]
1253 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1254 ; GCN2-NEXT: s_endpgm
1256 %tmp0 = atomicrmw volatile max ptr %out, i64 %in syncscope("workgroup") seq_cst
1260 define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
1261 ; GCN1-LABEL: atomic_max_i64_ret:
1262 ; GCN1: ; %bb.0: ; %entry
1263 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1264 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1265 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1266 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1267 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1268 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1269 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1270 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1271 ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
1272 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1273 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
1274 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
1275 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1276 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1277 ; GCN1-NEXT: s_endpgm
1279 ; GCN2-LABEL: atomic_max_i64_ret:
1280 ; GCN2: ; %bb.0: ; %entry
1281 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
1282 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
1283 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1284 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1285 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1286 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1287 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1288 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1289 ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[0:1], v[2:3] glc
1290 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1291 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
1292 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
1293 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1294 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1295 ; GCN2-NEXT: s_endpgm
1297 %tmp0 = atomicrmw volatile max ptr %out, i64 %in syncscope("workgroup") seq_cst
1298 store i64 %tmp0, ptr %out2
1302 define amdgpu_kernel void @atomic_max_i64_addr64(ptr %out, i64 %in, i64 %index) {
1303 ; GCN1-LABEL: atomic_max_i64_addr64:
1304 ; GCN1: ; %bb.0: ; %entry
1305 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1306 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1307 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1308 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
1309 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1310 ; GCN1-NEXT: s_add_u32 s0, s4, s0
1311 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
1312 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1313 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
1314 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1315 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1316 ; GCN1-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
1317 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1318 ; GCN1-NEXT: s_endpgm
1320 ; GCN2-LABEL: atomic_max_i64_addr64:
1321 ; GCN2: ; %bb.0: ; %entry
1322 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
1323 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
1324 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1325 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
1326 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1327 ; GCN2-NEXT: s_add_u32 s0, s4, s0
1328 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
1329 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1330 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
1331 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1332 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1333 ; GCN2-NEXT: flat_atomic_smax_x2 v[2:3], v[0:1]
1334 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1335 ; GCN2-NEXT: s_endpgm
1337 %ptr = getelementptr i64, ptr %out, i64 %index
1338 %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in syncscope("workgroup") seq_cst
1342 define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
1343 ; GCN1-LABEL: atomic_max_i64_ret_addr64:
1344 ; GCN1: ; %bb.0: ; %entry
1345 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
1346 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1347 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1348 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1349 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1350 ; GCN1-NEXT: s_add_u32 s0, s0, s4
1351 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
1352 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1353 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1354 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1355 ; GCN1-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
1356 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1357 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1358 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1359 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1360 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1361 ; GCN1-NEXT: s_endpgm
1363 ; GCN2-LABEL: atomic_max_i64_ret_addr64:
1364 ; GCN2: ; %bb.0: ; %entry
1365 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
1366 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1367 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1368 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1369 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1370 ; GCN2-NEXT: s_add_u32 s0, s0, s4
1371 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
1372 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1373 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1374 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1375 ; GCN2-NEXT: flat_atomic_smax_x2 v[0:1], v[2:3], v[0:1] glc
1376 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1377 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1378 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1379 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1380 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1381 ; GCN2-NEXT: s_endpgm
1383 %ptr = getelementptr i64, ptr %out, i64 %index
1384 %tmp0 = atomicrmw volatile max ptr %ptr, i64 %in syncscope("workgroup") seq_cst
1385 store i64 %tmp0, ptr %out2
1389 define amdgpu_kernel void @atomic_umax_i64_offset(ptr %out, i64 %in) {
1390 ; GCN1-LABEL: atomic_umax_i64_offset:
1391 ; GCN1: ; %bb.0: ; %entry
1392 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1393 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1394 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1395 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1396 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1397 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
1398 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
1399 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1400 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1401 ; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
1402 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1403 ; GCN1-NEXT: s_endpgm
1405 ; GCN2-LABEL: atomic_umax_i64_offset:
1406 ; GCN2: ; %bb.0: ; %entry
1407 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1408 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1409 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1410 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1411 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1412 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
1413 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
1414 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1415 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1416 ; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
1417 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1418 ; GCN2-NEXT: s_endpgm
1420 %gep = getelementptr i64, ptr %out, i64 4
1421 %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst
1425 define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
1426 ; GCN1-LABEL: atomic_umax_i64_ret_offset:
1427 ; GCN1: ; %bb.0: ; %entry
1428 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
1429 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1430 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1431 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1432 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1433 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1434 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1435 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1436 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1437 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1438 ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
1439 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1440 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1441 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1442 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1443 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1444 ; GCN1-NEXT: s_endpgm
1446 ; GCN2-LABEL: atomic_umax_i64_ret_offset:
1447 ; GCN2: ; %bb.0: ; %entry
1448 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
1449 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1450 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1451 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1452 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1453 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1454 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1455 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1456 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1457 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1458 ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
1459 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1460 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1461 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1462 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1463 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1464 ; GCN2-NEXT: s_endpgm
1466 %gep = getelementptr i64, ptr %out, i64 4
1467 %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst
1468 store i64 %tmp0, ptr %out2
1472 define amdgpu_kernel void @atomic_umax_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
1473 ; GCN1-LABEL: atomic_umax_i64_addr64_offset:
1474 ; GCN1: ; %bb.0: ; %entry
1475 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1476 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1477 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1478 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
1479 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1480 ; GCN1-NEXT: s_add_u32 s0, s4, s0
1481 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
1482 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1483 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1484 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1485 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
1486 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1487 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1488 ; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
1489 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1490 ; GCN1-NEXT: s_endpgm
1492 ; GCN2-LABEL: atomic_umax_i64_addr64_offset:
1493 ; GCN2: ; %bb.0: ; %entry
1494 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
1495 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
1496 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1497 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
1498 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1499 ; GCN2-NEXT: s_add_u32 s0, s4, s0
1500 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
1501 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1502 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1503 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1504 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
1505 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1506 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1507 ; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
1508 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1509 ; GCN2-NEXT: s_endpgm
1511 %ptr = getelementptr i64, ptr %out, i64 %index
1512 %gep = getelementptr i64, ptr %ptr, i64 4
1513 %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst
1517 define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
1518 ; GCN1-LABEL: atomic_umax_i64_ret_addr64_offset:
1519 ; GCN1: ; %bb.0: ; %entry
1520 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
1521 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1522 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1523 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1524 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1525 ; GCN1-NEXT: s_add_u32 s0, s0, s4
1526 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
1527 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1528 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1529 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1530 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1531 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1532 ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
1533 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1534 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1535 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1536 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1537 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1538 ; GCN1-NEXT: s_endpgm
1540 ; GCN2-LABEL: atomic_umax_i64_ret_addr64_offset:
1541 ; GCN2: ; %bb.0: ; %entry
1542 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
1543 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1544 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1545 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1546 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1547 ; GCN2-NEXT: s_add_u32 s0, s0, s4
1548 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
1549 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1550 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1551 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1552 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1553 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1554 ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
1555 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1556 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1557 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1558 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1559 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1560 ; GCN2-NEXT: s_endpgm
1562 %ptr = getelementptr i64, ptr %out, i64 %index
1563 %gep = getelementptr i64, ptr %ptr, i64 4
1564 %tmp0 = atomicrmw volatile umax ptr %gep, i64 %in syncscope("workgroup") seq_cst
1565 store i64 %tmp0, ptr %out2
1569 define amdgpu_kernel void @atomic_umax_i64(ptr %out, i64 %in) {
1570 ; GCN1-LABEL: atomic_umax_i64:
1571 ; GCN1: ; %bb.0: ; %entry
1572 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1573 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1574 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
1575 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
1576 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1577 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1578 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1579 ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
1580 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1581 ; GCN1-NEXT: s_endpgm
1583 ; GCN2-LABEL: atomic_umax_i64:
1584 ; GCN2: ; %bb.0: ; %entry
1585 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1586 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1587 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
1588 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
1589 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1590 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1591 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1592 ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3]
1593 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1594 ; GCN2-NEXT: s_endpgm
1596 %tmp0 = atomicrmw volatile umax ptr %out, i64 %in syncscope("workgroup") seq_cst
1600 define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
1601 ; GCN1-LABEL: atomic_umax_i64_ret:
1602 ; GCN1: ; %bb.0: ; %entry
1603 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1604 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1605 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1606 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1607 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1608 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1609 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1610 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1611 ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
1612 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1613 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
1614 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
1615 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1616 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1617 ; GCN1-NEXT: s_endpgm
1619 ; GCN2-LABEL: atomic_umax_i64_ret:
1620 ; GCN2: ; %bb.0: ; %entry
1621 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
1622 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
1623 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1624 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1625 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1626 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1627 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1628 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1629 ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[0:1], v[2:3] glc
1630 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1631 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
1632 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
1633 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1634 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1635 ; GCN2-NEXT: s_endpgm
1637 %tmp0 = atomicrmw volatile umax ptr %out, i64 %in syncscope("workgroup") seq_cst
1638 store i64 %tmp0, ptr %out2
1642 define amdgpu_kernel void @atomic_umax_i64_addr64(ptr %out, i64 %in, i64 %index) {
1643 ; GCN1-LABEL: atomic_umax_i64_addr64:
1644 ; GCN1: ; %bb.0: ; %entry
1645 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1646 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1647 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1648 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
1649 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1650 ; GCN1-NEXT: s_add_u32 s0, s4, s0
1651 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
1652 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1653 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
1654 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1655 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1656 ; GCN1-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
1657 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1658 ; GCN1-NEXT: s_endpgm
1660 ; GCN2-LABEL: atomic_umax_i64_addr64:
1661 ; GCN2: ; %bb.0: ; %entry
1662 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
1663 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
1664 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1665 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
1666 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1667 ; GCN2-NEXT: s_add_u32 s0, s4, s0
1668 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
1669 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1670 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
1671 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1672 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1673 ; GCN2-NEXT: flat_atomic_umax_x2 v[2:3], v[0:1]
1674 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1675 ; GCN2-NEXT: s_endpgm
1677 %ptr = getelementptr i64, ptr %out, i64 %index
1678 %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in syncscope("workgroup") seq_cst
1682 define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
1683 ; GCN1-LABEL: atomic_umax_i64_ret_addr64:
1684 ; GCN1: ; %bb.0: ; %entry
1685 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
1686 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1687 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1688 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1689 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1690 ; GCN1-NEXT: s_add_u32 s0, s0, s4
1691 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
1692 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1693 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1694 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1695 ; GCN1-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
1696 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1697 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1698 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1699 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1700 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1701 ; GCN1-NEXT: s_endpgm
1703 ; GCN2-LABEL: atomic_umax_i64_ret_addr64:
1704 ; GCN2: ; %bb.0: ; %entry
1705 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
1706 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1707 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1708 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1709 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1710 ; GCN2-NEXT: s_add_u32 s0, s0, s4
1711 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
1712 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1713 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1714 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1715 ; GCN2-NEXT: flat_atomic_umax_x2 v[0:1], v[2:3], v[0:1] glc
1716 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1717 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1718 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1719 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1720 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1721 ; GCN2-NEXT: s_endpgm
1723 %ptr = getelementptr i64, ptr %out, i64 %index
1724 %tmp0 = atomicrmw volatile umax ptr %ptr, i64 %in syncscope("workgroup") seq_cst
1725 store i64 %tmp0, ptr %out2
1729 define amdgpu_kernel void @atomic_min_i64_offset(ptr %out, i64 %in) {
1730 ; GCN1-LABEL: atomic_min_i64_offset:
1731 ; GCN1: ; %bb.0: ; %entry
1732 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1733 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1734 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1735 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1736 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1737 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
1738 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
1739 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1740 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1741 ; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
1742 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1743 ; GCN1-NEXT: s_endpgm
1745 ; GCN2-LABEL: atomic_min_i64_offset:
1746 ; GCN2: ; %bb.0: ; %entry
1747 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1748 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1749 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1750 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1751 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1752 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
1753 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
1754 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1755 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1756 ; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
1757 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1758 ; GCN2-NEXT: s_endpgm
1760 %gep = getelementptr i64, ptr %out, i64 4
1761 %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst
1765 define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
1766 ; GCN1-LABEL: atomic_min_i64_ret_offset:
1767 ; GCN1: ; %bb.0: ; %entry
1768 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
1769 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1770 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1771 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1772 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1773 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1774 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1775 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1776 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1777 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1778 ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
1779 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1780 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1781 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1782 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1783 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1784 ; GCN1-NEXT: s_endpgm
1786 ; GCN2-LABEL: atomic_min_i64_ret_offset:
1787 ; GCN2: ; %bb.0: ; %entry
1788 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
1789 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1790 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1791 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1792 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1793 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1794 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1795 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1796 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1797 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1798 ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
1799 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1800 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1801 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1802 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1803 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1804 ; GCN2-NEXT: s_endpgm
1806 %gep = getelementptr i64, ptr %out, i64 4
1807 %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst
1808 store i64 %tmp0, ptr %out2
1812 define amdgpu_kernel void @atomic_min_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
1813 ; GCN1-LABEL: atomic_min_i64_addr64_offset:
1814 ; GCN1: ; %bb.0: ; %entry
1815 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1816 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1817 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1818 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
1819 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1820 ; GCN1-NEXT: s_add_u32 s0, s4, s0
1821 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
1822 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1823 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1824 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1825 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
1826 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1827 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1828 ; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
1829 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1830 ; GCN1-NEXT: s_endpgm
1832 ; GCN2-LABEL: atomic_min_i64_addr64_offset:
1833 ; GCN2: ; %bb.0: ; %entry
1834 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
1835 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
1836 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1837 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
1838 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1839 ; GCN2-NEXT: s_add_u32 s0, s4, s0
1840 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
1841 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1842 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1843 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1844 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
1845 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1846 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1847 ; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
1848 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1849 ; GCN2-NEXT: s_endpgm
1851 %ptr = getelementptr i64, ptr %out, i64 %index
1852 %gep = getelementptr i64, ptr %ptr, i64 4
1853 %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst
1857 define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
1858 ; GCN1-LABEL: atomic_min_i64_ret_addr64_offset:
1859 ; GCN1: ; %bb.0: ; %entry
1860 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
1861 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1862 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1863 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1864 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1865 ; GCN1-NEXT: s_add_u32 s0, s0, s4
1866 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
1867 ; GCN1-NEXT: s_add_u32 s0, s0, 32
1868 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
1869 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1870 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1871 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1872 ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
1873 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1874 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1875 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1876 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1877 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1878 ; GCN1-NEXT: s_endpgm
1880 ; GCN2-LABEL: atomic_min_i64_ret_addr64_offset:
1881 ; GCN2: ; %bb.0: ; %entry
1882 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
1883 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1884 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1885 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1886 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
1887 ; GCN2-NEXT: s_add_u32 s0, s0, s4
1888 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
1889 ; GCN2-NEXT: s_add_u32 s0, s0, 32
1890 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
1891 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1892 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1893 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1894 ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
1895 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1896 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1897 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1898 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1899 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1900 ; GCN2-NEXT: s_endpgm
1902 %ptr = getelementptr i64, ptr %out, i64 %index
1903 %gep = getelementptr i64, ptr %ptr, i64 4
1904 %tmp0 = atomicrmw volatile min ptr %gep, i64 %in syncscope("workgroup") seq_cst
1905 store i64 %tmp0, ptr %out2
1909 define amdgpu_kernel void @atomic_min_i64(ptr %out, i64 %in) {
1910 ; GCN1-LABEL: atomic_min_i64:
1911 ; GCN1: ; %bb.0: ; %entry
1912 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
1913 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1914 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
1915 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
1916 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
1917 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
1918 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1919 ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
1920 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1921 ; GCN1-NEXT: s_endpgm
1923 ; GCN2-LABEL: atomic_min_i64:
1924 ; GCN2: ; %bb.0: ; %entry
1925 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
1926 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1927 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
1928 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
1929 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
1930 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
1931 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1932 ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3]
1933 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1934 ; GCN2-NEXT: s_endpgm
1936 %tmp0 = atomicrmw volatile min ptr %out, i64 %in syncscope("workgroup") seq_cst
1940 define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
1941 ; GCN1-LABEL: atomic_min_i64_ret:
1942 ; GCN1: ; %bb.0: ; %entry
1943 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1944 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1945 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1946 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
1947 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
1948 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1949 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1950 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1951 ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
1952 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1953 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
1954 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
1955 ; GCN1-NEXT: s_waitcnt vmcnt(0)
1956 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1957 ; GCN1-NEXT: s_endpgm
1959 ; GCN2-LABEL: atomic_min_i64_ret:
1960 ; GCN2: ; %bb.0: ; %entry
1961 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
1962 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
1963 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1964 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
1965 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
1966 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
1967 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
1968 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1969 ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[0:1], v[2:3] glc
1970 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
1971 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
1972 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
1973 ; GCN2-NEXT: s_waitcnt vmcnt(0)
1974 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1975 ; GCN2-NEXT: s_endpgm
1977 %tmp0 = atomicrmw volatile min ptr %out, i64 %in syncscope("workgroup") seq_cst
1978 store i64 %tmp0, ptr %out2
1982 define amdgpu_kernel void @atomic_min_i64_addr64(ptr %out, i64 %in, i64 %index) {
1983 ; GCN1-LABEL: atomic_min_i64_addr64:
1984 ; GCN1: ; %bb.0: ; %entry
1985 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
1986 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
1987 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1988 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
1989 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
1990 ; GCN1-NEXT: s_add_u32 s0, s4, s0
1991 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
1992 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
1993 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
1994 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
1995 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1996 ; GCN1-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
1997 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
1998 ; GCN1-NEXT: s_endpgm
2000 ; GCN2-LABEL: atomic_min_i64_addr64:
2001 ; GCN2: ; %bb.0: ; %entry
2002 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
2003 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
2004 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2005 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
2006 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2007 ; GCN2-NEXT: s_add_u32 s0, s4, s0
2008 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
2009 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2010 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
2011 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2012 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2013 ; GCN2-NEXT: flat_atomic_smin_x2 v[2:3], v[0:1]
2014 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2015 ; GCN2-NEXT: s_endpgm
2017 %ptr = getelementptr i64, ptr %out, i64 %index
2018 %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in syncscope("workgroup") seq_cst
2022 define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
2023 ; GCN1-LABEL: atomic_min_i64_ret_addr64:
2024 ; GCN1: ; %bb.0: ; %entry
2025 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
2026 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2027 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2028 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2029 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2030 ; GCN1-NEXT: s_add_u32 s0, s0, s4
2031 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
2032 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2033 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2034 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2035 ; GCN1-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
2036 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2037 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2038 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2039 ; GCN1-NEXT: s_waitcnt vmcnt(0)
2040 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2041 ; GCN1-NEXT: s_endpgm
2043 ; GCN2-LABEL: atomic_min_i64_ret_addr64:
2044 ; GCN2: ; %bb.0: ; %entry
2045 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
2046 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2047 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2048 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2049 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2050 ; GCN2-NEXT: s_add_u32 s0, s0, s4
2051 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
2052 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2053 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2054 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2055 ; GCN2-NEXT: flat_atomic_smin_x2 v[0:1], v[2:3], v[0:1] glc
2056 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2057 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2058 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2059 ; GCN2-NEXT: s_waitcnt vmcnt(0)
2060 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2061 ; GCN2-NEXT: s_endpgm
2063 %ptr = getelementptr i64, ptr %out, i64 %index
2064 %tmp0 = atomicrmw volatile min ptr %ptr, i64 %in syncscope("workgroup") seq_cst
2065 store i64 %tmp0, ptr %out2
2069 define amdgpu_kernel void @atomic_umin_i64_offset(ptr %out, i64 %in) {
2070 ; GCN1-LABEL: atomic_umin_i64_offset:
2071 ; GCN1: ; %bb.0: ; %entry
2072 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2073 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2074 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2075 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2076 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2077 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
2078 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
2079 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2080 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2081 ; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
2082 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2083 ; GCN1-NEXT: s_endpgm
2085 ; GCN2-LABEL: atomic_umin_i64_offset:
2086 ; GCN2: ; %bb.0: ; %entry
2087 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2088 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2089 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2090 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2091 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2092 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
2093 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
2094 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2095 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2096 ; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
2097 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2098 ; GCN2-NEXT: s_endpgm
2100 %gep = getelementptr i64, ptr %out, i64 4
2101 %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst
2105 define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
2106 ; GCN1-LABEL: atomic_umin_i64_ret_offset:
2107 ; GCN1: ; %bb.0: ; %entry
2108 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
2109 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2110 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2111 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2112 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2113 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2114 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2115 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2116 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2117 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2118 ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
2119 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2120 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2121 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2122 ; GCN1-NEXT: s_waitcnt vmcnt(0)
2123 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2124 ; GCN1-NEXT: s_endpgm
2126 ; GCN2-LABEL: atomic_umin_i64_ret_offset:
2127 ; GCN2: ; %bb.0: ; %entry
2128 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
2129 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2130 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2131 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2132 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2133 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2134 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2135 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2136 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2137 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2138 ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
2139 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2140 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2141 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2142 ; GCN2-NEXT: s_waitcnt vmcnt(0)
2143 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2144 ; GCN2-NEXT: s_endpgm
2146 %gep = getelementptr i64, ptr %out, i64 4
2147 %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst
2148 store i64 %tmp0, ptr %out2
2152 define amdgpu_kernel void @atomic_umin_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
2153 ; GCN1-LABEL: atomic_umin_i64_addr64_offset:
2154 ; GCN1: ; %bb.0: ; %entry
2155 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
2156 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
2157 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2158 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
2159 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2160 ; GCN1-NEXT: s_add_u32 s0, s4, s0
2161 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
2162 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2163 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2164 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2165 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
2166 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2167 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2168 ; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
2169 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2170 ; GCN1-NEXT: s_endpgm
2172 ; GCN2-LABEL: atomic_umin_i64_addr64_offset:
2173 ; GCN2: ; %bb.0: ; %entry
2174 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
2175 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
2176 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2177 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
2178 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2179 ; GCN2-NEXT: s_add_u32 s0, s4, s0
2180 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
2181 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2182 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2183 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2184 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
2185 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2186 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2187 ; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
2188 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2189 ; GCN2-NEXT: s_endpgm
2191 %ptr = getelementptr i64, ptr %out, i64 %index
2192 %gep = getelementptr i64, ptr %ptr, i64 4
2193 %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst
2197 define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
2198 ; GCN1-LABEL: atomic_umin_i64_ret_addr64_offset:
2199 ; GCN1: ; %bb.0: ; %entry
2200 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
2201 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2202 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2203 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2204 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2205 ; GCN1-NEXT: s_add_u32 s0, s0, s4
2206 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
2207 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2208 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2209 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2210 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2211 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2212 ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
2213 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2214 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2215 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2216 ; GCN1-NEXT: s_waitcnt vmcnt(0)
2217 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2218 ; GCN1-NEXT: s_endpgm
2220 ; GCN2-LABEL: atomic_umin_i64_ret_addr64_offset:
2221 ; GCN2: ; %bb.0: ; %entry
2222 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
2223 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2224 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2225 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2226 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2227 ; GCN2-NEXT: s_add_u32 s0, s0, s4
2228 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
2229 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2230 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2231 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2232 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2233 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2234 ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
2235 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2236 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2237 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2238 ; GCN2-NEXT: s_waitcnt vmcnt(0)
2239 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2240 ; GCN2-NEXT: s_endpgm
2242 %ptr = getelementptr i64, ptr %out, i64 %index
2243 %gep = getelementptr i64, ptr %ptr, i64 4
2244 %tmp0 = atomicrmw volatile umin ptr %gep, i64 %in syncscope("workgroup") seq_cst
2245 store i64 %tmp0, ptr %out2
2249 define amdgpu_kernel void @atomic_umin_i64(ptr %out, i64 %in) {
2250 ; GCN1-LABEL: atomic_umin_i64:
2251 ; GCN1: ; %bb.0: ; %entry
2252 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2253 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2254 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
2255 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
2256 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2257 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2258 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2259 ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
2260 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2261 ; GCN1-NEXT: s_endpgm
2263 ; GCN2-LABEL: atomic_umin_i64:
2264 ; GCN2: ; %bb.0: ; %entry
2265 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2266 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2267 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
2268 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
2269 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2270 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2271 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2272 ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3]
2273 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2274 ; GCN2-NEXT: s_endpgm
2276 %tmp0 = atomicrmw volatile umin ptr %out, i64 %in syncscope("workgroup") seq_cst
2280 define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
2281 ; GCN1-LABEL: atomic_umin_i64_ret:
2282 ; GCN1: ; %bb.0: ; %entry
2283 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
2284 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
2285 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2286 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2287 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2288 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2289 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2290 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2291 ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
2292 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2293 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
2294 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
2295 ; GCN1-NEXT: s_waitcnt vmcnt(0)
2296 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2297 ; GCN1-NEXT: s_endpgm
2299 ; GCN2-LABEL: atomic_umin_i64_ret:
2300 ; GCN2: ; %bb.0: ; %entry
2301 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
2302 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
2303 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2304 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2305 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2306 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2307 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2308 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2309 ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[0:1], v[2:3] glc
2310 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2311 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
2312 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
2313 ; GCN2-NEXT: s_waitcnt vmcnt(0)
2314 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2315 ; GCN2-NEXT: s_endpgm
2317 %tmp0 = atomicrmw volatile umin ptr %out, i64 %in syncscope("workgroup") seq_cst
2318 store i64 %tmp0, ptr %out2
2322 define amdgpu_kernel void @atomic_umin_i64_addr64(ptr %out, i64 %in, i64 %index) {
2323 ; GCN1-LABEL: atomic_umin_i64_addr64:
2324 ; GCN1: ; %bb.0: ; %entry
2325 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
2326 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
2327 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2328 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
2329 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2330 ; GCN1-NEXT: s_add_u32 s0, s4, s0
2331 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
2332 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2333 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
2334 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2335 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2336 ; GCN1-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
2337 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2338 ; GCN1-NEXT: s_endpgm
2340 ; GCN2-LABEL: atomic_umin_i64_addr64:
2341 ; GCN2: ; %bb.0: ; %entry
2342 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
2343 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
2344 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2345 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
2346 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2347 ; GCN2-NEXT: s_add_u32 s0, s4, s0
2348 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
2349 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2350 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
2351 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2352 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2353 ; GCN2-NEXT: flat_atomic_umin_x2 v[2:3], v[0:1]
2354 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2355 ; GCN2-NEXT: s_endpgm
2357 %ptr = getelementptr i64, ptr %out, i64 %index
2358 %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in syncscope("workgroup") seq_cst
2362 define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
2363 ; GCN1-LABEL: atomic_umin_i64_ret_addr64:
2364 ; GCN1: ; %bb.0: ; %entry
2365 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
2366 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2367 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2368 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2369 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2370 ; GCN1-NEXT: s_add_u32 s0, s0, s4
2371 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
2372 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2373 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2374 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2375 ; GCN1-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
2376 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2377 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2378 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2379 ; GCN1-NEXT: s_waitcnt vmcnt(0)
2380 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2381 ; GCN1-NEXT: s_endpgm
2383 ; GCN2-LABEL: atomic_umin_i64_ret_addr64:
2384 ; GCN2: ; %bb.0: ; %entry
2385 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
2386 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2387 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2388 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2389 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2390 ; GCN2-NEXT: s_add_u32 s0, s0, s4
2391 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
2392 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2393 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2394 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2395 ; GCN2-NEXT: flat_atomic_umin_x2 v[0:1], v[2:3], v[0:1] glc
2396 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2397 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2398 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2399 ; GCN2-NEXT: s_waitcnt vmcnt(0)
2400 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2401 ; GCN2-NEXT: s_endpgm
2403 %ptr = getelementptr i64, ptr %out, i64 %index
2404 %tmp0 = atomicrmw volatile umin ptr %ptr, i64 %in syncscope("workgroup") seq_cst
2405 store i64 %tmp0, ptr %out2
2409 define amdgpu_kernel void @atomic_or_i64_offset(ptr %out, i64 %in) {
2410 ; GCN1-LABEL: atomic_or_i64_offset:
2411 ; GCN1: ; %bb.0: ; %entry
2412 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2413 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2414 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2415 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2416 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2417 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
2418 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
2419 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2420 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2421 ; GCN1-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
2422 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2423 ; GCN1-NEXT: buffer_wbinvl1_vol
2424 ; GCN1-NEXT: s_endpgm
2426 ; GCN2-LABEL: atomic_or_i64_offset:
2427 ; GCN2: ; %bb.0: ; %entry
2428 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2429 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2430 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2431 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2432 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2433 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
2434 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
2435 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2436 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2437 ; GCN2-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
2438 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2439 ; GCN2-NEXT: buffer_wbinvl1_vol
2440 ; GCN2-NEXT: s_endpgm
2442 %gep = getelementptr i64, ptr %out, i64 4
2443 %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst
2447 define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
2448 ; GCN1-LABEL: atomic_or_i64_ret_offset:
2449 ; GCN1: ; %bb.0: ; %entry
2450 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
2451 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2452 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2453 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2454 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2455 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2456 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2457 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2458 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2459 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2460 ; GCN1-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
2461 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2462 ; GCN1-NEXT: buffer_wbinvl1_vol
2463 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2464 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2465 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2466 ; GCN1-NEXT: s_endpgm
2468 ; GCN2-LABEL: atomic_or_i64_ret_offset:
2469 ; GCN2: ; %bb.0: ; %entry
2470 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
2471 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2472 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2473 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2474 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2475 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2476 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2477 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2478 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2479 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2480 ; GCN2-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
2481 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2482 ; GCN2-NEXT: buffer_wbinvl1_vol
2483 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2484 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2485 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2486 ; GCN2-NEXT: s_endpgm
2488 %gep = getelementptr i64, ptr %out, i64 4
2489 %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst
2490 store i64 %tmp0, ptr %out2
2494 define amdgpu_kernel void @atomic_or_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
2495 ; GCN1-LABEL: atomic_or_i64_addr64_offset:
2496 ; GCN1: ; %bb.0: ; %entry
2497 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
2498 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
2499 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2500 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
2501 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2502 ; GCN1-NEXT: s_add_u32 s0, s4, s0
2503 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
2504 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2505 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2506 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2507 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
2508 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2509 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2510 ; GCN1-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
2511 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2512 ; GCN1-NEXT: buffer_wbinvl1_vol
2513 ; GCN1-NEXT: s_endpgm
2515 ; GCN2-LABEL: atomic_or_i64_addr64_offset:
2516 ; GCN2: ; %bb.0: ; %entry
2517 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
2518 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
2519 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2520 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
2521 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2522 ; GCN2-NEXT: s_add_u32 s0, s4, s0
2523 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
2524 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2525 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2526 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2527 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
2528 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2529 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2530 ; GCN2-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
2531 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2532 ; GCN2-NEXT: buffer_wbinvl1_vol
2533 ; GCN2-NEXT: s_endpgm
2535 %ptr = getelementptr i64, ptr %out, i64 %index
2536 %gep = getelementptr i64, ptr %ptr, i64 4
2537 %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst
2541 define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
2542 ; GCN1-LABEL: atomic_or_i64_ret_addr64_offset:
2543 ; GCN1: ; %bb.0: ; %entry
2544 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
2545 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2546 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2547 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2548 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2549 ; GCN1-NEXT: s_add_u32 s0, s0, s4
2550 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
2551 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2552 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2553 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2554 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2555 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2556 ; GCN1-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
2557 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2558 ; GCN1-NEXT: buffer_wbinvl1_vol
2559 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2560 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2561 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2562 ; GCN1-NEXT: s_endpgm
2564 ; GCN2-LABEL: atomic_or_i64_ret_addr64_offset:
2565 ; GCN2: ; %bb.0: ; %entry
2566 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
2567 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2568 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2569 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2570 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2571 ; GCN2-NEXT: s_add_u32 s0, s0, s4
2572 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
2573 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2574 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2575 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2576 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2577 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2578 ; GCN2-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
2579 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2580 ; GCN2-NEXT: buffer_wbinvl1_vol
2581 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2582 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2583 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2584 ; GCN2-NEXT: s_endpgm
2586 %ptr = getelementptr i64, ptr %out, i64 %index
2587 %gep = getelementptr i64, ptr %ptr, i64 4
2588 %tmp0 = atomicrmw volatile or ptr %gep, i64 %in syncscope("agent") seq_cst
2589 store i64 %tmp0, ptr %out2
2593 define amdgpu_kernel void @atomic_or_i64(ptr %out, i64 %in) {
2594 ; GCN1-LABEL: atomic_or_i64:
2595 ; GCN1: ; %bb.0: ; %entry
2596 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2597 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2598 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
2599 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
2600 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2601 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2602 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2603 ; GCN1-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
2604 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2605 ; GCN1-NEXT: buffer_wbinvl1_vol
2606 ; GCN1-NEXT: s_endpgm
2608 ; GCN2-LABEL: atomic_or_i64:
2609 ; GCN2: ; %bb.0: ; %entry
2610 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2611 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2612 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
2613 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
2614 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2615 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2616 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2617 ; GCN2-NEXT: flat_atomic_or_x2 v[0:1], v[2:3]
2618 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2619 ; GCN2-NEXT: buffer_wbinvl1_vol
2620 ; GCN2-NEXT: s_endpgm
2622 %tmp0 = atomicrmw volatile or ptr %out, i64 %in syncscope("agent") seq_cst
2626 define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
2627 ; GCN1-LABEL: atomic_or_i64_ret:
2628 ; GCN1: ; %bb.0: ; %entry
2629 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
2630 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
2631 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2632 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2633 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2634 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2635 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2636 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2637 ; GCN1-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
2638 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2639 ; GCN1-NEXT: buffer_wbinvl1_vol
2640 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
2641 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
2642 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2643 ; GCN1-NEXT: s_endpgm
2645 ; GCN2-LABEL: atomic_or_i64_ret:
2646 ; GCN2: ; %bb.0: ; %entry
2647 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
2648 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
2649 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2650 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2651 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2652 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2653 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2654 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2655 ; GCN2-NEXT: flat_atomic_or_x2 v[0:1], v[0:1], v[2:3] glc
2656 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2657 ; GCN2-NEXT: buffer_wbinvl1_vol
2658 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
2659 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
2660 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2661 ; GCN2-NEXT: s_endpgm
2663 %tmp0 = atomicrmw volatile or ptr %out, i64 %in syncscope("agent") seq_cst
2664 store i64 %tmp0, ptr %out2
2668 define amdgpu_kernel void @atomic_or_i64_addr64(ptr %out, i64 %in, i64 %index) {
2669 ; GCN1-LABEL: atomic_or_i64_addr64:
2670 ; GCN1: ; %bb.0: ; %entry
2671 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
2672 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
2673 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2674 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
2675 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2676 ; GCN1-NEXT: s_add_u32 s0, s4, s0
2677 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
2678 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2679 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
2680 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2681 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2682 ; GCN1-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
2683 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2684 ; GCN1-NEXT: buffer_wbinvl1_vol
2685 ; GCN1-NEXT: s_endpgm
2687 ; GCN2-LABEL: atomic_or_i64_addr64:
2688 ; GCN2: ; %bb.0: ; %entry
2689 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
2690 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
2691 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2692 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
2693 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2694 ; GCN2-NEXT: s_add_u32 s0, s4, s0
2695 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
2696 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2697 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
2698 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2699 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2700 ; GCN2-NEXT: flat_atomic_or_x2 v[2:3], v[0:1]
2701 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2702 ; GCN2-NEXT: buffer_wbinvl1_vol
2703 ; GCN2-NEXT: s_endpgm
2705 %ptr = getelementptr i64, ptr %out, i64 %index
2706 %tmp0 = atomicrmw volatile or ptr %ptr, i64 %in syncscope("agent") seq_cst
2710 define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
2711 ; GCN1-LABEL: atomic_or_i64_ret_addr64:
2712 ; GCN1: ; %bb.0: ; %entry
2713 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
2714 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2715 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2716 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2717 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2718 ; GCN1-NEXT: s_add_u32 s0, s0, s4
2719 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
2720 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2721 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2722 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2723 ; GCN1-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
2724 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2725 ; GCN1-NEXT: buffer_wbinvl1_vol
2726 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2727 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2728 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2729 ; GCN1-NEXT: s_endpgm
2731 ; GCN2-LABEL: atomic_or_i64_ret_addr64:
2732 ; GCN2: ; %bb.0: ; %entry
2733 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
2734 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2735 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2736 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2737 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2738 ; GCN2-NEXT: s_add_u32 s0, s0, s4
2739 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
2740 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2741 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2742 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2743 ; GCN2-NEXT: flat_atomic_or_x2 v[0:1], v[2:3], v[0:1] glc
2744 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2745 ; GCN2-NEXT: buffer_wbinvl1_vol
2746 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2747 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2748 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2749 ; GCN2-NEXT: s_endpgm
2751 %ptr = getelementptr i64, ptr %out, i64 %index
2752 %tmp0 = atomicrmw volatile or ptr %ptr, i64 %in syncscope("agent") seq_cst
2753 store i64 %tmp0, ptr %out2
2757 define amdgpu_kernel void @atomic_xchg_i64_offset(ptr %out, i64 %in) {
2758 ; GCN1-LABEL: atomic_xchg_i64_offset:
2759 ; GCN1: ; %bb.0: ; %entry
2760 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2761 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2762 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2763 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2764 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2765 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
2766 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
2767 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2768 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2769 ; GCN1-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
2770 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2771 ; GCN1-NEXT: buffer_wbinvl1_vol
2772 ; GCN1-NEXT: s_endpgm
2774 ; GCN2-LABEL: atomic_xchg_i64_offset:
2775 ; GCN2: ; %bb.0: ; %entry
2776 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2777 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2778 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2779 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2780 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2781 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
2782 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
2783 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2784 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2785 ; GCN2-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
2786 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2787 ; GCN2-NEXT: buffer_wbinvl1_vol
2788 ; GCN2-NEXT: s_endpgm
2790 %gep = getelementptr i64, ptr %out, i64 4
2791 %tmp0 = atomicrmw volatile xchg ptr %gep, i64 %in syncscope("agent") seq_cst
2795 define amdgpu_kernel void @atomic_xchg_f64_offset(ptr %out, double %in) {
2796 ; GCN1-LABEL: atomic_xchg_f64_offset:
2797 ; GCN1: ; %bb.0: ; %entry
2798 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2799 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2800 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2801 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2802 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2803 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
2804 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
2805 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2806 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2807 ; GCN1-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
2808 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2809 ; GCN1-NEXT: buffer_wbinvl1_vol
2810 ; GCN1-NEXT: s_endpgm
2812 ; GCN2-LABEL: atomic_xchg_f64_offset:
2813 ; GCN2: ; %bb.0: ; %entry
2814 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2815 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2816 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2817 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2818 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2819 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
2820 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
2821 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2822 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2823 ; GCN2-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
2824 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2825 ; GCN2-NEXT: buffer_wbinvl1_vol
2826 ; GCN2-NEXT: s_endpgm
2828 %gep = getelementptr double, ptr %out, i64 4
2829 %tmp0 = atomicrmw volatile xchg ptr %gep, double %in syncscope("agent") seq_cst
2833 define amdgpu_kernel void @atomic_xchg_pointer_offset(ptr %out, ptr %in) {
2834 ; GCN1-LABEL: atomic_xchg_pointer_offset:
2835 ; GCN1: ; %bb.0: ; %entry
2836 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2837 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2838 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2839 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2840 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2841 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
2842 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
2843 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2844 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2845 ; GCN1-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
2846 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2847 ; GCN1-NEXT: buffer_wbinvl1_vol
2848 ; GCN1-NEXT: s_endpgm
2850 ; GCN2-LABEL: atomic_xchg_pointer_offset:
2851 ; GCN2: ; %bb.0: ; %entry
2852 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2853 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2854 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2855 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2856 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2857 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
2858 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
2859 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2860 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2861 ; GCN2-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
2862 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2863 ; GCN2-NEXT: buffer_wbinvl1_vol
2864 ; GCN2-NEXT: s_endpgm
2866 %gep = getelementptr ptr, ptr %out, i32 4
2867 %val = atomicrmw volatile xchg ptr %gep, ptr %in syncscope("agent") seq_cst
2871 define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
2872 ; GCN1-LABEL: atomic_xchg_i64_ret_offset:
2873 ; GCN1: ; %bb.0: ; %entry
2874 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
2875 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
2876 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2877 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2878 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2879 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2880 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2881 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2882 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2883 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2884 ; GCN1-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
2885 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2886 ; GCN1-NEXT: buffer_wbinvl1_vol
2887 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2888 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2889 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2890 ; GCN1-NEXT: s_endpgm
2892 ; GCN2-LABEL: atomic_xchg_i64_ret_offset:
2893 ; GCN2: ; %bb.0: ; %entry
2894 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
2895 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
2896 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2897 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2898 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2899 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2900 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2901 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2902 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2903 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2904 ; GCN2-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
2905 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2906 ; GCN2-NEXT: buffer_wbinvl1_vol
2907 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
2908 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
2909 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2910 ; GCN2-NEXT: s_endpgm
2912 %gep = getelementptr i64, ptr %out, i64 4
2913 %tmp0 = atomicrmw volatile xchg ptr %gep, i64 %in syncscope("agent") seq_cst
2914 store i64 %tmp0, ptr %out2
2918 define amdgpu_kernel void @atomic_xchg_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
2919 ; GCN1-LABEL: atomic_xchg_i64_addr64_offset:
2920 ; GCN1: ; %bb.0: ; %entry
2921 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
2922 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
2923 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2924 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
2925 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2926 ; GCN1-NEXT: s_add_u32 s0, s4, s0
2927 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
2928 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2929 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2930 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2931 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
2932 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2933 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2934 ; GCN1-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
2935 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2936 ; GCN1-NEXT: buffer_wbinvl1_vol
2937 ; GCN1-NEXT: s_endpgm
2939 ; GCN2-LABEL: atomic_xchg_i64_addr64_offset:
2940 ; GCN2: ; %bb.0: ; %entry
2941 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
2942 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
2943 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2944 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
2945 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
2946 ; GCN2-NEXT: s_add_u32 s0, s4, s0
2947 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
2948 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2949 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2950 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
2951 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
2952 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
2953 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2954 ; GCN2-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
2955 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2956 ; GCN2-NEXT: buffer_wbinvl1_vol
2957 ; GCN2-NEXT: s_endpgm
2959 %ptr = getelementptr i64, ptr %out, i64 %index
2960 %gep = getelementptr i64, ptr %ptr, i64 4
2961 %tmp0 = atomicrmw volatile xchg ptr %gep, i64 %in syncscope("agent") seq_cst
2965 define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
2966 ; GCN1-LABEL: atomic_xchg_i64_ret_addr64_offset:
2967 ; GCN1: ; %bb.0: ; %entry
2968 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
2969 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
2970 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
2971 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
2972 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2973 ; GCN1-NEXT: s_add_u32 s0, s0, s4
2974 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
2975 ; GCN1-NEXT: s_add_u32 s0, s0, 32
2976 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
2977 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
2978 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
2979 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2980 ; GCN1-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
2981 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
2982 ; GCN1-NEXT: buffer_wbinvl1_vol
2983 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
2984 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
2985 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
2986 ; GCN1-NEXT: s_endpgm
2988 ; GCN2-LABEL: atomic_xchg_i64_ret_addr64_offset:
2989 ; GCN2: ; %bb.0: ; %entry
2990 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
2991 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
2992 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
2993 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
2994 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
2995 ; GCN2-NEXT: s_add_u32 s0, s0, s4
2996 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
2997 ; GCN2-NEXT: s_add_u32 s0, s0, 32
2998 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
2999 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3000 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3001 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3002 ; GCN2-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
3003 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3004 ; GCN2-NEXT: buffer_wbinvl1_vol
3005 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3006 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3007 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3008 ; GCN2-NEXT: s_endpgm
3010 %ptr = getelementptr i64, ptr %out, i64 %index
3011 %gep = getelementptr i64, ptr %ptr, i64 4
3012 %tmp0 = atomicrmw volatile xchg ptr %gep, i64 %in syncscope("agent") seq_cst
3013 store i64 %tmp0, ptr %out2
3017 define amdgpu_kernel void @atomic_xchg_i64(ptr %out, i64 %in) {
3018 ; GCN1-LABEL: atomic_xchg_i64:
3019 ; GCN1: ; %bb.0: ; %entry
3020 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3021 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3022 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
3023 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
3024 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3025 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3026 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3027 ; GCN1-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3]
3028 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3029 ; GCN1-NEXT: buffer_wbinvl1_vol
3030 ; GCN1-NEXT: s_endpgm
3032 ; GCN2-LABEL: atomic_xchg_i64:
3033 ; GCN2: ; %bb.0: ; %entry
3034 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3035 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3036 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
3037 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
3038 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3039 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3040 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3041 ; GCN2-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3]
3042 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3043 ; GCN2-NEXT: buffer_wbinvl1_vol
3044 ; GCN2-NEXT: s_endpgm
3046 %tmp0 = atomicrmw volatile xchg ptr %out, i64 %in syncscope("agent") seq_cst
3050 define amdgpu_kernel void @atomic_xchg_i64_ret(ptr %out, ptr %out2, i64 %in) {
3051 ; GCN1-LABEL: atomic_xchg_i64_ret:
3052 ; GCN1: ; %bb.0: ; %entry
3053 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3054 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3055 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3056 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3057 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3058 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3059 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3060 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3061 ; GCN1-NEXT: flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
3062 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3063 ; GCN1-NEXT: buffer_wbinvl1_vol
3064 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
3065 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
3066 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3067 ; GCN1-NEXT: s_endpgm
3069 ; GCN2-LABEL: atomic_xchg_i64_ret:
3070 ; GCN2: ; %bb.0: ; %entry
3071 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3072 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3073 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3074 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3075 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3076 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3077 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3078 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3079 ; GCN2-NEXT: flat_atomic_swap_x2 v[0:1], v[0:1], v[2:3] glc
3080 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3081 ; GCN2-NEXT: buffer_wbinvl1_vol
3082 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
3083 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
3084 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3085 ; GCN2-NEXT: s_endpgm
3087 %tmp0 = atomicrmw volatile xchg ptr %out, i64 %in syncscope("agent") seq_cst
3088 store i64 %tmp0, ptr %out2
3092 define amdgpu_kernel void @atomic_xchg_i64_addr64(ptr %out, i64 %in, i64 %index) {
3093 ; GCN1-LABEL: atomic_xchg_i64_addr64:
3094 ; GCN1: ; %bb.0: ; %entry
3095 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3096 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3097 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3098 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
3099 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3100 ; GCN1-NEXT: s_add_u32 s0, s4, s0
3101 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
3102 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3103 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
3104 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3105 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3106 ; GCN1-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
3107 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3108 ; GCN1-NEXT: buffer_wbinvl1_vol
3109 ; GCN1-NEXT: s_endpgm
3111 ; GCN2-LABEL: atomic_xchg_i64_addr64:
3112 ; GCN2: ; %bb.0: ; %entry
3113 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3114 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3115 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3116 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
3117 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3118 ; GCN2-NEXT: s_add_u32 s0, s4, s0
3119 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
3120 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3121 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
3122 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3123 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3124 ; GCN2-NEXT: flat_atomic_swap_x2 v[2:3], v[0:1]
3125 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3126 ; GCN2-NEXT: buffer_wbinvl1_vol
3127 ; GCN2-NEXT: s_endpgm
3129 %ptr = getelementptr i64, ptr %out, i64 %index
3130 %tmp0 = atomicrmw volatile xchg ptr %ptr, i64 %in syncscope("agent") seq_cst
3134 define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
3135 ; GCN1-LABEL: atomic_xchg_i64_ret_addr64:
3136 ; GCN1: ; %bb.0: ; %entry
3137 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
3138 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3139 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3140 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3141 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
3142 ; GCN1-NEXT: s_add_u32 s0, s0, s4
3143 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
3144 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3145 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3146 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3147 ; GCN1-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
3148 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3149 ; GCN1-NEXT: buffer_wbinvl1_vol
3150 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3151 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3152 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3153 ; GCN1-NEXT: s_endpgm
3155 ; GCN2-LABEL: atomic_xchg_i64_ret_addr64:
3156 ; GCN2: ; %bb.0: ; %entry
3157 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
3158 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3159 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3160 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3161 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
3162 ; GCN2-NEXT: s_add_u32 s0, s0, s4
3163 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
3164 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3165 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3166 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3167 ; GCN2-NEXT: flat_atomic_swap_x2 v[0:1], v[2:3], v[0:1] glc
3168 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3169 ; GCN2-NEXT: buffer_wbinvl1_vol
3170 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3171 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3172 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3173 ; GCN2-NEXT: s_endpgm
3175 %ptr = getelementptr i64, ptr %out, i64 %index
3176 %tmp0 = atomicrmw volatile xchg ptr %ptr, i64 %in syncscope("agent") seq_cst
3177 store i64 %tmp0, ptr %out2
3181 define amdgpu_kernel void @atomic_xor_i64_offset(ptr %out, i64 %in) {
3182 ; GCN1-LABEL: atomic_xor_i64_offset:
3183 ; GCN1: ; %bb.0: ; %entry
3184 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3185 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3186 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3187 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3188 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3189 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
3190 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
3191 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3192 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3193 ; GCN1-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
3194 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3195 ; GCN1-NEXT: buffer_wbinvl1_vol
3196 ; GCN1-NEXT: s_endpgm
3198 ; GCN2-LABEL: atomic_xor_i64_offset:
3199 ; GCN2: ; %bb.0: ; %entry
3200 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3201 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3202 ; GCN2-NEXT: s_add_u32 s0, s0, 32
3203 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
3204 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3205 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
3206 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
3207 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3208 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3209 ; GCN2-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
3210 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3211 ; GCN2-NEXT: buffer_wbinvl1_vol
3212 ; GCN2-NEXT: s_endpgm
3214 %gep = getelementptr i64, ptr %out, i64 4
3215 %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst
3219 define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
3220 ; GCN1-LABEL: atomic_xor_i64_ret_offset:
3221 ; GCN1: ; %bb.0: ; %entry
3222 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
3223 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3224 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3225 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3226 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3227 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3228 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3229 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3230 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3231 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3232 ; GCN1-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
3233 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3234 ; GCN1-NEXT: buffer_wbinvl1_vol
3235 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3236 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3237 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3238 ; GCN1-NEXT: s_endpgm
3240 ; GCN2-LABEL: atomic_xor_i64_ret_offset:
3241 ; GCN2: ; %bb.0: ; %entry
3242 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
3243 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3244 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3245 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3246 ; GCN2-NEXT: s_add_u32 s0, s0, 32
3247 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
3248 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3249 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3250 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3251 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3252 ; GCN2-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
3253 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3254 ; GCN2-NEXT: buffer_wbinvl1_vol
3255 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3256 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3257 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3258 ; GCN2-NEXT: s_endpgm
3260 %gep = getelementptr i64, ptr %out, i64 4
3261 %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst
3262 store i64 %tmp0, ptr %out2
3266 define amdgpu_kernel void @atomic_xor_i64_addr64_offset(ptr %out, i64 %in, i64 %index) {
3267 ; GCN1-LABEL: atomic_xor_i64_addr64_offset:
3268 ; GCN1: ; %bb.0: ; %entry
3269 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3270 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3271 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3272 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
3273 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3274 ; GCN1-NEXT: s_add_u32 s0, s4, s0
3275 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
3276 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3277 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3278 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3279 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
3280 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3281 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3282 ; GCN1-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
3283 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3284 ; GCN1-NEXT: buffer_wbinvl1_vol
3285 ; GCN1-NEXT: s_endpgm
3287 ; GCN2-LABEL: atomic_xor_i64_addr64_offset:
3288 ; GCN2: ; %bb.0: ; %entry
3289 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3290 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3291 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3292 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
3293 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3294 ; GCN2-NEXT: s_add_u32 s0, s4, s0
3295 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
3296 ; GCN2-NEXT: s_add_u32 s0, s0, 32
3297 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
3298 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3299 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
3300 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3301 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3302 ; GCN2-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
3303 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3304 ; GCN2-NEXT: buffer_wbinvl1_vol
3305 ; GCN2-NEXT: s_endpgm
3307 %ptr = getelementptr i64, ptr %out, i64 %index
3308 %gep = getelementptr i64, ptr %ptr, i64 4
3309 %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst
3313 define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
3314 ; GCN1-LABEL: atomic_xor_i64_ret_addr64_offset:
3315 ; GCN1: ; %bb.0: ; %entry
3316 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
3317 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3318 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3319 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3320 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
3321 ; GCN1-NEXT: s_add_u32 s0, s0, s4
3322 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
3323 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3324 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3325 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3326 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3327 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3328 ; GCN1-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
3329 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3330 ; GCN1-NEXT: buffer_wbinvl1_vol
3331 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3332 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3333 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3334 ; GCN1-NEXT: s_endpgm
3336 ; GCN2-LABEL: atomic_xor_i64_ret_addr64_offset:
3337 ; GCN2: ; %bb.0: ; %entry
3338 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
3339 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3340 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3341 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3342 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
3343 ; GCN2-NEXT: s_add_u32 s0, s0, s4
3344 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
3345 ; GCN2-NEXT: s_add_u32 s0, s0, 32
3346 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
3347 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3348 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3349 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3350 ; GCN2-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
3351 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3352 ; GCN2-NEXT: buffer_wbinvl1_vol
3353 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3354 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3355 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3356 ; GCN2-NEXT: s_endpgm
3358 %ptr = getelementptr i64, ptr %out, i64 %index
3359 %gep = getelementptr i64, ptr %ptr, i64 4
3360 %tmp0 = atomicrmw volatile xor ptr %gep, i64 %in syncscope("agent") seq_cst
3361 store i64 %tmp0, ptr %out2
3365 define amdgpu_kernel void @atomic_xor_i64(ptr %out, i64 %in) {
3366 ; GCN1-LABEL: atomic_xor_i64:
3367 ; GCN1: ; %bb.0: ; %entry
3368 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3369 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3370 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
3371 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
3372 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3373 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3374 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3375 ; GCN1-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
3376 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3377 ; GCN1-NEXT: buffer_wbinvl1_vol
3378 ; GCN1-NEXT: s_endpgm
3380 ; GCN2-LABEL: atomic_xor_i64:
3381 ; GCN2: ; %bb.0: ; %entry
3382 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3383 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3384 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
3385 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
3386 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3387 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3388 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3389 ; GCN2-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3]
3390 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3391 ; GCN2-NEXT: buffer_wbinvl1_vol
3392 ; GCN2-NEXT: s_endpgm
3394 %tmp0 = atomicrmw volatile xor ptr %out, i64 %in syncscope("agent") seq_cst
3398 define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
3399 ; GCN1-LABEL: atomic_xor_i64_ret:
3400 ; GCN1: ; %bb.0: ; %entry
3401 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3402 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3403 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3404 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3405 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3406 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3407 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3408 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3409 ; GCN1-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
3410 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3411 ; GCN1-NEXT: buffer_wbinvl1_vol
3412 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
3413 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
3414 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3415 ; GCN1-NEXT: s_endpgm
3417 ; GCN2-LABEL: atomic_xor_i64_ret:
3418 ; GCN2: ; %bb.0: ; %entry
3419 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3420 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3421 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3422 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3423 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3424 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3425 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3426 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3427 ; GCN2-NEXT: flat_atomic_xor_x2 v[0:1], v[0:1], v[2:3] glc
3428 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3429 ; GCN2-NEXT: buffer_wbinvl1_vol
3430 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
3431 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
3432 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3433 ; GCN2-NEXT: s_endpgm
3435 %tmp0 = atomicrmw volatile xor ptr %out, i64 %in syncscope("agent") seq_cst
3436 store i64 %tmp0, ptr %out2
3440 define amdgpu_kernel void @atomic_xor_i64_addr64(ptr %out, i64 %in, i64 %index) {
3441 ; GCN1-LABEL: atomic_xor_i64_addr64:
3442 ; GCN1: ; %bb.0: ; %entry
3443 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3444 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3445 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3446 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
3447 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3448 ; GCN1-NEXT: s_add_u32 s0, s4, s0
3449 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
3450 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3451 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
3452 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3453 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3454 ; GCN1-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
3455 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3456 ; GCN1-NEXT: buffer_wbinvl1_vol
3457 ; GCN1-NEXT: s_endpgm
3459 ; GCN2-LABEL: atomic_xor_i64_addr64:
3460 ; GCN2: ; %bb.0: ; %entry
3461 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3462 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3463 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3464 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
3465 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3466 ; GCN2-NEXT: s_add_u32 s0, s4, s0
3467 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
3468 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3469 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
3470 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3471 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3472 ; GCN2-NEXT: flat_atomic_xor_x2 v[2:3], v[0:1]
3473 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3474 ; GCN2-NEXT: buffer_wbinvl1_vol
3475 ; GCN2-NEXT: s_endpgm
3477 %ptr = getelementptr i64, ptr %out, i64 %index
3478 %tmp0 = atomicrmw volatile xor ptr %ptr, i64 %in syncscope("agent") seq_cst
3482 define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
3483 ; GCN1-LABEL: atomic_xor_i64_ret_addr64:
3484 ; GCN1: ; %bb.0: ; %entry
3485 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
3486 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3487 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3488 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3489 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
3490 ; GCN1-NEXT: s_add_u32 s0, s0, s4
3491 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
3492 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3493 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3494 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3495 ; GCN1-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
3496 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3497 ; GCN1-NEXT: buffer_wbinvl1_vol
3498 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3499 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3500 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3501 ; GCN1-NEXT: s_endpgm
3503 ; GCN2-LABEL: atomic_xor_i64_ret_addr64:
3504 ; GCN2: ; %bb.0: ; %entry
3505 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
3506 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3507 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3508 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3509 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
3510 ; GCN2-NEXT: s_add_u32 s0, s0, s4
3511 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
3512 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3513 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3514 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3515 ; GCN2-NEXT: flat_atomic_xor_x2 v[0:1], v[2:3], v[0:1] glc
3516 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3517 ; GCN2-NEXT: buffer_wbinvl1_vol
3518 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3519 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3520 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3521 ; GCN2-NEXT: s_endpgm
3523 %ptr = getelementptr i64, ptr %out, i64 %index
3524 %tmp0 = atomicrmw volatile xor ptr %ptr, i64 %in syncscope("agent") seq_cst
3525 store i64 %tmp0, ptr %out2
3529 define amdgpu_kernel void @atomic_load_i64_offset(ptr %in, ptr %out) {
3530 ; GCN1-LABEL: atomic_load_i64_offset:
3531 ; GCN1: ; %bb.0: ; %entry
3532 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3533 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3534 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3535 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3536 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
3537 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
3538 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3539 ; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
3540 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3541 ; GCN1-NEXT: buffer_wbinvl1_vol
3542 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3543 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3544 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3545 ; GCN1-NEXT: s_endpgm
3547 ; GCN2-LABEL: atomic_load_i64_offset:
3548 ; GCN2: ; %bb.0: ; %entry
3549 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3550 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3551 ; GCN2-NEXT: s_add_u32 s0, s0, 32
3552 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
3553 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
3554 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
3555 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3556 ; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
3557 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3558 ; GCN2-NEXT: buffer_wbinvl1_vol
3559 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3560 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3561 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3562 ; GCN2-NEXT: s_endpgm
3564 %gep = getelementptr i64, ptr %in, i64 4
3565 %val = load atomic i64, ptr %gep seq_cst, align 8
3566 store i64 %val, ptr %out
3570 define amdgpu_kernel void @atomic_load_i64(ptr %in, ptr %out) {
3571 ; GCN1-LABEL: atomic_load_i64:
3572 ; GCN1: ; %bb.0: ; %entry
3573 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3574 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3575 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
3576 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
3577 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3578 ; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
3579 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3580 ; GCN1-NEXT: buffer_wbinvl1_vol
3581 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3582 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3583 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3584 ; GCN1-NEXT: s_endpgm
3586 ; GCN2-LABEL: atomic_load_i64:
3587 ; GCN2: ; %bb.0: ; %entry
3588 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3589 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3590 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
3591 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
3592 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3593 ; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
3594 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3595 ; GCN2-NEXT: buffer_wbinvl1_vol
3596 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3597 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3598 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3599 ; GCN2-NEXT: s_endpgm
3601 %val = load atomic i64, ptr %in syncscope("agent") seq_cst, align 8
3602 store i64 %val, ptr %out
3606 define amdgpu_kernel void @atomic_load_i64_addr64_offset(ptr %in, ptr %out, i64 %index) {
3607 ; GCN1-LABEL: atomic_load_i64_addr64_offset:
3608 ; GCN1: ; %bb.0: ; %entry
3609 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
3610 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3611 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3612 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
3613 ; GCN1-NEXT: s_add_u32 s0, s0, s4
3614 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
3615 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3616 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3617 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
3618 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
3619 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3620 ; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
3621 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3622 ; GCN1-NEXT: buffer_wbinvl1_vol
3623 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3624 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3625 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3626 ; GCN1-NEXT: s_endpgm
3628 ; GCN2-LABEL: atomic_load_i64_addr64_offset:
3629 ; GCN2: ; %bb.0: ; %entry
3630 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
3631 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3632 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3633 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
3634 ; GCN2-NEXT: s_add_u32 s0, s0, s4
3635 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
3636 ; GCN2-NEXT: s_add_u32 s0, s0, 32
3637 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
3638 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
3639 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
3640 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3641 ; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
3642 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3643 ; GCN2-NEXT: buffer_wbinvl1_vol
3644 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3645 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3646 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3647 ; GCN2-NEXT: s_endpgm
3649 %ptr = getelementptr i64, ptr %in, i64 %index
3650 %gep = getelementptr i64, ptr %ptr, i64 4
3651 %val = load atomic i64, ptr %gep seq_cst, align 8
3652 store i64 %val, ptr %out
3656 define amdgpu_kernel void @atomic_load_i64_addr64(ptr %in, ptr %out, i64 %index) {
3657 ; GCN1-LABEL: atomic_load_i64_addr64:
3658 ; GCN1: ; %bb.0: ; %entry
3659 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
3660 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3661 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3662 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
3663 ; GCN1-NEXT: s_add_u32 s0, s0, s4
3664 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
3665 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
3666 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
3667 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3668 ; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
3669 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3670 ; GCN1-NEXT: buffer_wbinvl1_vol
3671 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3672 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3673 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3674 ; GCN1-NEXT: s_endpgm
3676 ; GCN2-LABEL: atomic_load_i64_addr64:
3677 ; GCN2: ; %bb.0: ; %entry
3678 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
3679 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3680 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3681 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
3682 ; GCN2-NEXT: s_add_u32 s0, s0, s4
3683 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
3684 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
3685 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
3686 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3687 ; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
3688 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3689 ; GCN2-NEXT: buffer_wbinvl1_vol
3690 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3691 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3692 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3693 ; GCN2-NEXT: s_endpgm
3695 %ptr = getelementptr i64, ptr %in, i64 %index
3696 %val = load atomic i64, ptr %ptr seq_cst, align 8
3697 store i64 %val, ptr %out
3701 define amdgpu_kernel void @atomic_store_i64_offset(i64 %in, ptr %out) {
3702 ; GCN1-LABEL: atomic_store_i64_offset:
3703 ; GCN1: ; %bb.0: ; %entry
3704 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3705 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3706 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
3707 ; GCN1-NEXT: s_add_u32 s0, s2, 32
3708 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
3709 ; GCN1-NEXT: s_addc_u32 s1, s3, 0
3710 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3711 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3712 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3713 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3714 ; GCN1-NEXT: s_endpgm
3716 ; GCN2-LABEL: atomic_store_i64_offset:
3717 ; GCN2: ; %bb.0: ; %entry
3718 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3719 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3720 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
3721 ; GCN2-NEXT: s_add_u32 s0, s2, 32
3722 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
3723 ; GCN2-NEXT: s_addc_u32 s1, s3, 0
3724 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3725 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3726 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3727 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3728 ; GCN2-NEXT: s_endpgm
3730 %gep = getelementptr i64, ptr %out, i64 4
3731 store atomic i64 %in, ptr %gep seq_cst, align 8
3735 define amdgpu_kernel void @atomic_store_i64(i64 %in, ptr %out) {
3736 ; GCN1-LABEL: atomic_store_i64:
3737 ; GCN1: ; %bb.0: ; %entry
3738 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
3739 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3740 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
3741 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
3742 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3743 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3744 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3745 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3746 ; GCN1-NEXT: s_endpgm
3748 ; GCN2-LABEL: atomic_store_i64:
3749 ; GCN2: ; %bb.0: ; %entry
3750 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
3751 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3752 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
3753 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
3754 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3755 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3756 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3757 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3758 ; GCN2-NEXT: s_endpgm
3760 store atomic i64 %in, ptr %out seq_cst, align 8
3764 define amdgpu_kernel void @atomic_store_i64_addr64_offset(i64 %in, ptr %out, i64 %index) {
3765 ; GCN1-LABEL: atomic_store_i64_addr64_offset:
3766 ; GCN1: ; %bb.0: ; %entry
3767 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3768 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3769 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3770 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3771 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3772 ; GCN1-NEXT: s_add_u32 s0, s6, s0
3773 ; GCN1-NEXT: s_addc_u32 s1, s7, s1
3774 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3775 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3776 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3777 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3778 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3779 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3780 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3781 ; GCN1-NEXT: s_endpgm
3783 ; GCN2-LABEL: atomic_store_i64_addr64_offset:
3784 ; GCN2: ; %bb.0: ; %entry
3785 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3786 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3787 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3788 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3789 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3790 ; GCN2-NEXT: s_add_u32 s0, s6, s0
3791 ; GCN2-NEXT: s_addc_u32 s1, s7, s1
3792 ; GCN2-NEXT: s_add_u32 s0, s0, 32
3793 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
3794 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3795 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3796 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3797 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3798 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3799 ; GCN2-NEXT: s_endpgm
3801 %ptr = getelementptr i64, ptr %out, i64 %index
3802 %gep = getelementptr i64, ptr %ptr, i64 4
3803 store atomic i64 %in, ptr %gep seq_cst, align 8
3807 define amdgpu_kernel void @atomic_store_i64_addr64(i64 %in, ptr %out, i64 %index) {
3808 ; GCN1-LABEL: atomic_store_i64_addr64:
3809 ; GCN1: ; %bb.0: ; %entry
3810 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3811 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3812 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3813 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3814 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3815 ; GCN1-NEXT: s_add_u32 s0, s6, s0
3816 ; GCN1-NEXT: s_addc_u32 s1, s7, s1
3817 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3818 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3819 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3820 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3821 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3822 ; GCN1-NEXT: s_endpgm
3824 ; GCN2-LABEL: atomic_store_i64_addr64:
3825 ; GCN2: ; %bb.0: ; %entry
3826 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3827 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3828 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3829 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3830 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
3831 ; GCN2-NEXT: s_add_u32 s0, s6, s0
3832 ; GCN2-NEXT: s_addc_u32 s1, s7, s1
3833 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3834 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3835 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3836 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3837 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3838 ; GCN2-NEXT: s_endpgm
3840 %ptr = getelementptr i64, ptr %out, i64 %index
3841 store atomic i64 %in, ptr %ptr seq_cst, align 8
3845 define amdgpu_kernel void @atomic_cmpxchg_i64_offset(ptr %out, i64 %in, i64 %old) {
3846 ; GCN1-LABEL: atomic_cmpxchg_i64_offset:
3847 ; GCN1: ; %bb.0: ; %entry
3848 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3849 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3850 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3851 ; GCN1-NEXT: s_add_u32 s2, s4, 32
3852 ; GCN1-NEXT: s_addc_u32 s3, s5, 0
3853 ; GCN1-NEXT: v_mov_b32_e32 v5, s3
3854 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
3855 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
3856 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3857 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3858 ; GCN1-NEXT: v_mov_b32_e32 v4, s2
3859 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3860 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
3861 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3862 ; GCN1-NEXT: buffer_wbinvl1_vol
3863 ; GCN1-NEXT: s_endpgm
3865 ; GCN2-LABEL: atomic_cmpxchg_i64_offset:
3866 ; GCN2: ; %bb.0: ; %entry
3867 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3868 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3869 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3870 ; GCN2-NEXT: s_add_u32 s2, s4, 32
3871 ; GCN2-NEXT: s_addc_u32 s3, s5, 0
3872 ; GCN2-NEXT: v_mov_b32_e32 v5, s3
3873 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
3874 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
3875 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3876 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3877 ; GCN2-NEXT: v_mov_b32_e32 v4, s2
3878 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3879 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
3880 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3881 ; GCN2-NEXT: buffer_wbinvl1_vol
3882 ; GCN2-NEXT: s_endpgm
3884 %gep = getelementptr i64, ptr %out, i64 4
3885 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
3889 define amdgpu_kernel void @atomic_cmpxchg_i64_soffset(ptr %out, i64 %in, i64 %old) {
3890 ; GCN1-LABEL: atomic_cmpxchg_i64_soffset:
3891 ; GCN1: ; %bb.0: ; %entry
3892 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
3893 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
3894 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3895 ; GCN1-NEXT: s_add_u32 s2, s4, 0x11940
3896 ; GCN1-NEXT: s_addc_u32 s3, s5, 0
3897 ; GCN1-NEXT: v_mov_b32_e32 v5, s3
3898 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
3899 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
3900 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
3901 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
3902 ; GCN1-NEXT: v_mov_b32_e32 v4, s2
3903 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3904 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
3905 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3906 ; GCN1-NEXT: buffer_wbinvl1_vol
3907 ; GCN1-NEXT: s_endpgm
3909 ; GCN2-LABEL: atomic_cmpxchg_i64_soffset:
3910 ; GCN2: ; %bb.0: ; %entry
3911 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
3912 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
3913 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3914 ; GCN2-NEXT: s_add_u32 s2, s4, 0x11940
3915 ; GCN2-NEXT: s_addc_u32 s3, s5, 0
3916 ; GCN2-NEXT: v_mov_b32_e32 v5, s3
3917 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
3918 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
3919 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
3920 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
3921 ; GCN2-NEXT: v_mov_b32_e32 v4, s2
3922 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3923 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
3924 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3925 ; GCN2-NEXT: buffer_wbinvl1_vol
3926 ; GCN2-NEXT: s_endpgm
3928 %gep = getelementptr i64, ptr %out, i64 9000
3929 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
3933 define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i64 %in, i64 %old) {
3934 ; GCN1-LABEL: atomic_cmpxchg_i64_ret_offset:
3935 ; GCN1: ; %bb.0: ; %entry
3936 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
3937 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3938 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3939 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3940 ; GCN1-NEXT: v_mov_b32_e32 v5, s1
3941 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
3942 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
3943 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
3944 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
3945 ; GCN1-NEXT: v_mov_b32_e32 v4, s0
3946 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3947 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
3948 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3949 ; GCN1-NEXT: buffer_wbinvl1_vol
3950 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
3951 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
3952 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3953 ; GCN1-NEXT: s_endpgm
3955 ; GCN2-LABEL: atomic_cmpxchg_i64_ret_offset:
3956 ; GCN2: ; %bb.0: ; %entry
3957 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
3958 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
3959 ; GCN2-NEXT: s_add_u32 s0, s0, 32
3960 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
3961 ; GCN2-NEXT: v_mov_b32_e32 v5, s1
3962 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
3963 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
3964 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
3965 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
3966 ; GCN2-NEXT: v_mov_b32_e32 v4, s0
3967 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3968 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
3969 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
3970 ; GCN2-NEXT: buffer_wbinvl1_vol
3971 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
3972 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
3973 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
3974 ; GCN2-NEXT: s_endpgm
3976 %gep = getelementptr i64, ptr %out, i64 4
3977 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
3978 %extract0 = extractvalue { i64, i1 } %val, 0
3979 store i64 %extract0, ptr %out2
3983 define amdgpu_kernel void @atomic_cmpxchg_i64_addr64_offset(ptr %out, i64 %in, i64 %index, i64 %old) {
3984 ; GCN1-LABEL: atomic_cmpxchg_i64_addr64_offset:
3985 ; GCN1: ; %bb.0: ; %entry
3986 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
3987 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
3988 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
3989 ; GCN1-NEXT: s_add_u32 s0, s0, s4
3990 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
3991 ; GCN1-NEXT: s_add_u32 s0, s0, 32
3992 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
3993 ; GCN1-NEXT: v_mov_b32_e32 v5, s1
3994 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
3995 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
3996 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
3997 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
3998 ; GCN1-NEXT: v_mov_b32_e32 v4, s0
3999 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4000 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
4001 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4002 ; GCN1-NEXT: buffer_wbinvl1_vol
4003 ; GCN1-NEXT: s_endpgm
4005 ; GCN2-LABEL: atomic_cmpxchg_i64_addr64_offset:
4006 ; GCN2: ; %bb.0: ; %entry
4007 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
4008 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4009 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
4010 ; GCN2-NEXT: s_add_u32 s0, s0, s4
4011 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
4012 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4013 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4014 ; GCN2-NEXT: v_mov_b32_e32 v5, s1
4015 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
4016 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
4017 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
4018 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
4019 ; GCN2-NEXT: v_mov_b32_e32 v4, s0
4020 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4021 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
4022 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4023 ; GCN2-NEXT: buffer_wbinvl1_vol
4024 ; GCN2-NEXT: s_endpgm
4026 %ptr = getelementptr i64, ptr %out, i64 %index
4027 %gep = getelementptr i64, ptr %ptr, i64 4
4028 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
4032 define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index, i64 %old) {
4033 ; GCN1-LABEL: atomic_cmpxchg_i64_ret_addr64_offset:
4034 ; GCN1: ; %bb.0: ; %entry
4035 ; GCN1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x9
4036 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x11
4037 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4038 ; GCN1-NEXT: s_lshl_b64 s[2:3], s[10:11], 3
4039 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4040 ; GCN1-NEXT: s_add_u32 s0, s4, s2
4041 ; GCN1-NEXT: s_addc_u32 s3, s5, s3
4042 ; GCN1-NEXT: s_add_u32 s2, s0, 32
4043 ; GCN1-NEXT: s_addc_u32 s3, s3, 0
4044 ; GCN1-NEXT: v_mov_b32_e32 v5, s3
4045 ; GCN1-NEXT: v_mov_b32_e32 v0, s8
4046 ; GCN1-NEXT: v_mov_b32_e32 v1, s9
4047 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4048 ; GCN1-NEXT: v_mov_b32_e32 v4, s2
4049 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4050 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
4051 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4052 ; GCN1-NEXT: buffer_wbinvl1_vol
4053 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
4054 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
4055 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4056 ; GCN1-NEXT: s_endpgm
4058 ; GCN2-LABEL: atomic_cmpxchg_i64_ret_addr64_offset:
4059 ; GCN2: ; %bb.0: ; %entry
4060 ; GCN2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x24
4061 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x44
4062 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4063 ; GCN2-NEXT: s_lshl_b64 s[2:3], s[10:11], 3
4064 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4065 ; GCN2-NEXT: s_add_u32 s0, s4, s2
4066 ; GCN2-NEXT: s_addc_u32 s3, s5, s3
4067 ; GCN2-NEXT: s_add_u32 s2, s0, 32
4068 ; GCN2-NEXT: s_addc_u32 s3, s3, 0
4069 ; GCN2-NEXT: v_mov_b32_e32 v5, s3
4070 ; GCN2-NEXT: v_mov_b32_e32 v0, s8
4071 ; GCN2-NEXT: v_mov_b32_e32 v1, s9
4072 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4073 ; GCN2-NEXT: v_mov_b32_e32 v4, s2
4074 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4075 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
4076 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4077 ; GCN2-NEXT: buffer_wbinvl1_vol
4078 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
4079 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
4080 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4081 ; GCN2-NEXT: s_endpgm
4083 %ptr = getelementptr i64, ptr %out, i64 %index
4084 %gep = getelementptr i64, ptr %ptr, i64 4
4085 %val = cmpxchg volatile ptr %gep, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
4086 %extract0 = extractvalue { i64, i1 } %val, 0
4087 store i64 %extract0, ptr %out2
4091 define amdgpu_kernel void @atomic_cmpxchg_i64(ptr %out, i64 %in, i64 %old) {
4092 ; GCN1-LABEL: atomic_cmpxchg_i64:
4093 ; GCN1: ; %bb.0: ; %entry
4094 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
4095 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
4096 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4097 ; GCN1-NEXT: v_mov_b32_e32 v4, s4
4098 ; GCN1-NEXT: v_mov_b32_e32 v5, s5
4099 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
4100 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
4101 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4102 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4103 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4104 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
4105 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4106 ; GCN1-NEXT: buffer_wbinvl1_vol
4107 ; GCN1-NEXT: s_endpgm
4109 ; GCN2-LABEL: atomic_cmpxchg_i64:
4110 ; GCN2: ; %bb.0: ; %entry
4111 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
4112 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
4113 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4114 ; GCN2-NEXT: v_mov_b32_e32 v4, s4
4115 ; GCN2-NEXT: v_mov_b32_e32 v5, s5
4116 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
4117 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
4118 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4119 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4120 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4121 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
4122 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4123 ; GCN2-NEXT: buffer_wbinvl1_vol
4124 ; GCN2-NEXT: s_endpgm
4126 %val = cmpxchg volatile ptr %out, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
4130 define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in, i64 %old) {
4131 ; GCN1-LABEL: atomic_cmpxchg_i64_ret:
4132 ; GCN1: ; %bb.0: ; %entry
4133 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
4134 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4135 ; GCN1-NEXT: v_mov_b32_e32 v4, s0
4136 ; GCN1-NEXT: v_mov_b32_e32 v5, s1
4137 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
4138 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
4139 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
4140 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
4141 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4142 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
4143 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4144 ; GCN1-NEXT: buffer_wbinvl1_vol
4145 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4146 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4147 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4148 ; GCN1-NEXT: s_endpgm
4150 ; GCN2-LABEL: atomic_cmpxchg_i64_ret:
4151 ; GCN2: ; %bb.0: ; %entry
4152 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
4153 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4154 ; GCN2-NEXT: v_mov_b32_e32 v4, s0
4155 ; GCN2-NEXT: v_mov_b32_e32 v5, s1
4156 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
4157 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
4158 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
4159 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
4160 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4161 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
4162 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4163 ; GCN2-NEXT: buffer_wbinvl1_vol
4164 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4165 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4166 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4167 ; GCN2-NEXT: s_endpgm
4169 %val = cmpxchg volatile ptr %out, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
4170 %extract0 = extractvalue { i64, i1 } %val, 0
4171 store i64 %extract0, ptr %out2
4175 define amdgpu_kernel void @atomic_cmpxchg_i64_addr64(ptr %out, i64 %in, i64 %index, i64 %old) {
4176 ; GCN1-LABEL: atomic_cmpxchg_i64_addr64:
4177 ; GCN1: ; %bb.0: ; %entry
4178 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
4179 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4180 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
4181 ; GCN1-NEXT: s_add_u32 s0, s0, s4
4182 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
4183 ; GCN1-NEXT: v_mov_b32_e32 v5, s1
4184 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
4185 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
4186 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
4187 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
4188 ; GCN1-NEXT: v_mov_b32_e32 v4, s0
4189 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4190 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
4191 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4192 ; GCN1-NEXT: buffer_wbinvl1_vol
4193 ; GCN1-NEXT: s_endpgm
4195 ; GCN2-LABEL: atomic_cmpxchg_i64_addr64:
4196 ; GCN2: ; %bb.0: ; %entry
4197 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
4198 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4199 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
4200 ; GCN2-NEXT: s_add_u32 s0, s0, s4
4201 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
4202 ; GCN2-NEXT: v_mov_b32_e32 v5, s1
4203 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
4204 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
4205 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
4206 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
4207 ; GCN2-NEXT: v_mov_b32_e32 v4, s0
4208 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4209 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[4:5], v[0:3]
4210 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4211 ; GCN2-NEXT: buffer_wbinvl1_vol
4212 ; GCN2-NEXT: s_endpgm
4214 %ptr = getelementptr i64, ptr %out, i64 %index
4215 %val = cmpxchg volatile ptr %ptr, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
4219 define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %in, i64 %index, i64 %old) {
4220 ; GCN1-LABEL: atomic_cmpxchg_i64_ret_addr64:
4221 ; GCN1: ; %bb.0: ; %entry
4222 ; GCN1-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x9
4223 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x11
4224 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4225 ; GCN1-NEXT: s_lshl_b64 s[2:3], s[10:11], 3
4226 ; GCN1-NEXT: s_add_u32 s2, s4, s2
4227 ; GCN1-NEXT: s_addc_u32 s3, s5, s3
4228 ; GCN1-NEXT: v_mov_b32_e32 v5, s3
4229 ; GCN1-NEXT: v_mov_b32_e32 v0, s8
4230 ; GCN1-NEXT: v_mov_b32_e32 v1, s9
4231 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4232 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4233 ; GCN1-NEXT: v_mov_b32_e32 v4, s2
4234 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4235 ; GCN1-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
4236 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4237 ; GCN1-NEXT: buffer_wbinvl1_vol
4238 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
4239 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
4240 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4241 ; GCN1-NEXT: s_endpgm
4243 ; GCN2-LABEL: atomic_cmpxchg_i64_ret_addr64:
4244 ; GCN2: ; %bb.0: ; %entry
4245 ; GCN2-NEXT: s_load_dwordx8 s[4:11], s[0:1], 0x24
4246 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x44
4247 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4248 ; GCN2-NEXT: s_lshl_b64 s[2:3], s[10:11], 3
4249 ; GCN2-NEXT: s_add_u32 s2, s4, s2
4250 ; GCN2-NEXT: s_addc_u32 s3, s5, s3
4251 ; GCN2-NEXT: v_mov_b32_e32 v5, s3
4252 ; GCN2-NEXT: v_mov_b32_e32 v0, s8
4253 ; GCN2-NEXT: v_mov_b32_e32 v1, s9
4254 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4255 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4256 ; GCN2-NEXT: v_mov_b32_e32 v4, s2
4257 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4258 ; GCN2-NEXT: flat_atomic_cmpswap_x2 v[0:1], v[4:5], v[0:3] glc
4259 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4260 ; GCN2-NEXT: buffer_wbinvl1_vol
4261 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
4262 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
4263 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4264 ; GCN2-NEXT: s_endpgm
4266 %ptr = getelementptr i64, ptr %out, i64 %index
4267 %val = cmpxchg volatile ptr %ptr, i64 %old, i64 %in syncscope("agent") seq_cst seq_cst
4268 %extract0 = extractvalue { i64, i1 } %val, 0
4269 store i64 %extract0, ptr %out2
4273 define amdgpu_kernel void @atomic_load_f64_offset(ptr %in, ptr %out) {
4274 ; GCN1-LABEL: atomic_load_f64_offset:
4275 ; GCN1: ; %bb.0: ; %entry
4276 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4277 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4278 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4279 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4280 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
4281 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
4282 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4283 ; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
4284 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4285 ; GCN1-NEXT: buffer_wbinvl1_vol
4286 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4287 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4288 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4289 ; GCN1-NEXT: s_endpgm
4291 ; GCN2-LABEL: atomic_load_f64_offset:
4292 ; GCN2: ; %bb.0: ; %entry
4293 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4294 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4295 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4296 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4297 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
4298 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
4299 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4300 ; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
4301 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4302 ; GCN2-NEXT: buffer_wbinvl1_vol
4303 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4304 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4305 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4306 ; GCN2-NEXT: s_endpgm
4308 %gep = getelementptr double, ptr %in, i64 4
4309 %val = load atomic double, ptr %gep seq_cst, align 8
4310 store double %val, ptr %out
4314 define amdgpu_kernel void @atomic_load_f64(ptr %in, ptr %out) {
4315 ; GCN1-LABEL: atomic_load_f64:
4316 ; GCN1: ; %bb.0: ; %entry
4317 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4318 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4319 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
4320 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
4321 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4322 ; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
4323 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4324 ; GCN1-NEXT: buffer_wbinvl1_vol
4325 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4326 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4327 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4328 ; GCN1-NEXT: s_endpgm
4330 ; GCN2-LABEL: atomic_load_f64:
4331 ; GCN2: ; %bb.0: ; %entry
4332 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4333 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4334 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
4335 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
4336 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4337 ; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
4338 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4339 ; GCN2-NEXT: buffer_wbinvl1_vol
4340 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4341 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4342 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4343 ; GCN2-NEXT: s_endpgm
4345 %val = load atomic double, ptr %in syncscope("agent") seq_cst, align 8
4346 store double %val, ptr %out
4350 define amdgpu_kernel void @atomic_load_f64_addr64_offset(ptr %in, ptr %out, i64 %index) {
4351 ; GCN1-LABEL: atomic_load_f64_addr64_offset:
4352 ; GCN1: ; %bb.0: ; %entry
4353 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
4354 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4355 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4356 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
4357 ; GCN1-NEXT: s_add_u32 s0, s0, s4
4358 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
4359 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4360 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4361 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
4362 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
4363 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4364 ; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
4365 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4366 ; GCN1-NEXT: buffer_wbinvl1_vol
4367 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4368 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4369 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4370 ; GCN1-NEXT: s_endpgm
4372 ; GCN2-LABEL: atomic_load_f64_addr64_offset:
4373 ; GCN2: ; %bb.0: ; %entry
4374 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
4375 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4376 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4377 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
4378 ; GCN2-NEXT: s_add_u32 s0, s0, s4
4379 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
4380 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4381 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4382 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
4383 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
4384 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4385 ; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
4386 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4387 ; GCN2-NEXT: buffer_wbinvl1_vol
4388 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4389 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4390 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4391 ; GCN2-NEXT: s_endpgm
4393 %ptr = getelementptr double, ptr %in, i64 %index
4394 %gep = getelementptr double, ptr %ptr, i64 4
4395 %val = load atomic double, ptr %gep seq_cst, align 8
4396 store double %val, ptr %out
4400 define amdgpu_kernel void @atomic_load_f64_addr64(ptr %in, ptr %out, i64 %index) {
4401 ; GCN1-LABEL: atomic_load_f64_addr64:
4402 ; GCN1: ; %bb.0: ; %entry
4403 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
4404 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4405 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4406 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
4407 ; GCN1-NEXT: s_add_u32 s0, s0, s4
4408 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
4409 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
4410 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
4411 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4412 ; GCN1-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
4413 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4414 ; GCN1-NEXT: buffer_wbinvl1_vol
4415 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4416 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4417 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4418 ; GCN1-NEXT: s_endpgm
4420 ; GCN2-LABEL: atomic_load_f64_addr64:
4421 ; GCN2: ; %bb.0: ; %entry
4422 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
4423 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4424 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4425 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[4:5], 3
4426 ; GCN2-NEXT: s_add_u32 s0, s0, s4
4427 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
4428 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
4429 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
4430 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4431 ; GCN2-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
4432 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4433 ; GCN2-NEXT: buffer_wbinvl1_vol
4434 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4435 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4436 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4437 ; GCN2-NEXT: s_endpgm
4439 %ptr = getelementptr double, ptr %in, i64 %index
4440 %val = load atomic double, ptr %ptr seq_cst, align 8
4441 store double %val, ptr %out
4445 define amdgpu_kernel void @atomic_store_f64_offset(double %in, ptr %out) {
4446 ; GCN1-LABEL: atomic_store_f64_offset:
4447 ; GCN1: ; %bb.0: ; %entry
4448 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4449 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4450 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
4451 ; GCN1-NEXT: s_add_u32 s0, s2, 32
4452 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
4453 ; GCN1-NEXT: s_addc_u32 s1, s3, 0
4454 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4455 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4456 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4457 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4458 ; GCN1-NEXT: s_endpgm
4460 ; GCN2-LABEL: atomic_store_f64_offset:
4461 ; GCN2: ; %bb.0: ; %entry
4462 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4463 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4464 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
4465 ; GCN2-NEXT: s_add_u32 s0, s2, 32
4466 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
4467 ; GCN2-NEXT: s_addc_u32 s1, s3, 0
4468 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4469 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4470 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4471 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4472 ; GCN2-NEXT: s_endpgm
4474 %gep = getelementptr double, ptr %out, i64 4
4475 store atomic double %in, ptr %gep seq_cst, align 8
4479 define amdgpu_kernel void @atomic_store_f64(double %in, ptr %out) {
4480 ; GCN1-LABEL: atomic_store_f64:
4481 ; GCN1: ; %bb.0: ; %entry
4482 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4483 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4484 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
4485 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
4486 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4487 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4488 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4489 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4490 ; GCN1-NEXT: s_endpgm
4492 ; GCN2-LABEL: atomic_store_f64:
4493 ; GCN2: ; %bb.0: ; %entry
4494 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4495 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4496 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
4497 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
4498 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4499 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4500 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4501 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4502 ; GCN2-NEXT: s_endpgm
4504 store atomic double %in, ptr %out seq_cst, align 8
4508 define amdgpu_kernel void @atomic_store_f64_addr64_offset(double %in, ptr %out, i64 %index) {
4509 ; GCN1-LABEL: atomic_store_f64_addr64_offset:
4510 ; GCN1: ; %bb.0: ; %entry
4511 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
4512 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
4513 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4514 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
4515 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
4516 ; GCN1-NEXT: s_add_u32 s0, s6, s0
4517 ; GCN1-NEXT: s_addc_u32 s1, s7, s1
4518 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4519 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4520 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4521 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
4522 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4523 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4524 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4525 ; GCN1-NEXT: s_endpgm
4527 ; GCN2-LABEL: atomic_store_f64_addr64_offset:
4528 ; GCN2: ; %bb.0: ; %entry
4529 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
4530 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
4531 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4532 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
4533 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
4534 ; GCN2-NEXT: s_add_u32 s0, s6, s0
4535 ; GCN2-NEXT: s_addc_u32 s1, s7, s1
4536 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4537 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4538 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4539 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
4540 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4541 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4542 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4543 ; GCN2-NEXT: s_endpgm
4545 %ptr = getelementptr double, ptr %out, i64 %index
4546 %gep = getelementptr double, ptr %ptr, i64 4
4547 store atomic double %in, ptr %gep seq_cst, align 8
4551 define amdgpu_kernel void @atomic_store_f64_addr64(double %in, ptr %out, i64 %index) {
4552 ; GCN1-LABEL: atomic_store_f64_addr64:
4553 ; GCN1: ; %bb.0: ; %entry
4554 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
4555 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
4556 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4557 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
4558 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
4559 ; GCN1-NEXT: s_add_u32 s0, s6, s0
4560 ; GCN1-NEXT: s_addc_u32 s1, s7, s1
4561 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4562 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
4563 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4564 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4565 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4566 ; GCN1-NEXT: s_endpgm
4568 ; GCN2-LABEL: atomic_store_f64_addr64:
4569 ; GCN2: ; %bb.0: ; %entry
4570 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
4571 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
4572 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4573 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
4574 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
4575 ; GCN2-NEXT: s_add_u32 s0, s6, s0
4576 ; GCN2-NEXT: s_addc_u32 s1, s7, s1
4577 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4578 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
4579 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4580 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4581 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4582 ; GCN2-NEXT: s_endpgm
4584 %ptr = getelementptr double, ptr %out, i64 %index
4585 store atomic double %in, ptr %ptr seq_cst, align 8
4589 define amdgpu_kernel void @atomic_inc_i64_offset(ptr %out, i64 %in) {
4590 ; GCN1-LABEL: atomic_inc_i64_offset:
4591 ; GCN1: ; %bb.0: ; %entry
4592 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4593 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4594 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4595 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4596 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4597 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
4598 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
4599 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4600 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4601 ; GCN1-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
4602 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4603 ; GCN1-NEXT: buffer_wbinvl1_vol
4604 ; GCN1-NEXT: s_endpgm
4606 ; GCN2-LABEL: atomic_inc_i64_offset:
4607 ; GCN2: ; %bb.0: ; %entry
4608 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4609 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4610 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4611 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4612 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4613 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
4614 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
4615 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4616 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4617 ; GCN2-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
4618 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4619 ; GCN2-NEXT: buffer_wbinvl1_vol
4620 ; GCN2-NEXT: s_endpgm
4622 %gep = getelementptr i64, ptr %out, i64 4
4623 %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
4627 define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
4628 ; GCN1-LABEL: atomic_inc_i64_ret_offset:
4629 ; GCN1: ; %bb.0: ; %entry
4630 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
4631 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4632 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4633 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
4634 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4635 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4636 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4637 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
4638 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4639 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4640 ; GCN1-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
4641 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4642 ; GCN1-NEXT: buffer_wbinvl1_vol
4643 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4644 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4645 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4646 ; GCN1-NEXT: s_endpgm
4648 ; GCN2-LABEL: atomic_inc_i64_ret_offset:
4649 ; GCN2: ; %bb.0: ; %entry
4650 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
4651 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4652 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4653 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
4654 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4655 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4656 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4657 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
4658 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4659 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4660 ; GCN2-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
4661 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4662 ; GCN2-NEXT: buffer_wbinvl1_vol
4663 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4664 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4665 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4666 ; GCN2-NEXT: s_endpgm
4668 %gep = getelementptr i64, ptr %out, i64 4
4669 %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
4670 store i64 %tmp0, ptr %out2
4674 define amdgpu_kernel void @atomic_inc_i64_incr64_offset(ptr %out, i64 %in, i64 %index) {
4675 ; GCN1-LABEL: atomic_inc_i64_incr64_offset:
4676 ; GCN1: ; %bb.0: ; %entry
4677 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
4678 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
4679 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4680 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
4681 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
4682 ; GCN1-NEXT: s_add_u32 s0, s4, s0
4683 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
4684 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4685 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4686 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4687 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
4688 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4689 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4690 ; GCN1-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
4691 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4692 ; GCN1-NEXT: buffer_wbinvl1_vol
4693 ; GCN1-NEXT: s_endpgm
4695 ; GCN2-LABEL: atomic_inc_i64_incr64_offset:
4696 ; GCN2: ; %bb.0: ; %entry
4697 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
4698 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
4699 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4700 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
4701 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
4702 ; GCN2-NEXT: s_add_u32 s0, s4, s0
4703 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
4704 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4705 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4706 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4707 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
4708 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4709 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4710 ; GCN2-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
4711 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4712 ; GCN2-NEXT: buffer_wbinvl1_vol
4713 ; GCN2-NEXT: s_endpgm
4715 %ptr = getelementptr i64, ptr %out, i64 %index
4716 %gep = getelementptr i64, ptr %ptr, i64 4
4717 %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
4721 define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
4722 ; GCN1-LABEL: atomic_inc_i64_ret_incr64_offset:
4723 ; GCN1: ; %bb.0: ; %entry
4724 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
4725 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4726 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
4727 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
4728 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
4729 ; GCN1-NEXT: s_add_u32 s0, s0, s4
4730 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
4731 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4732 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4733 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4734 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4735 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4736 ; GCN1-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
4737 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4738 ; GCN1-NEXT: buffer_wbinvl1_vol
4739 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4740 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4741 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4742 ; GCN1-NEXT: s_endpgm
4744 ; GCN2-LABEL: atomic_inc_i64_ret_incr64_offset:
4745 ; GCN2: ; %bb.0: ; %entry
4746 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
4747 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4748 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
4749 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
4750 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
4751 ; GCN2-NEXT: s_add_u32 s0, s0, s4
4752 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
4753 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4754 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4755 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4756 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4757 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4758 ; GCN2-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
4759 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4760 ; GCN2-NEXT: buffer_wbinvl1_vol
4761 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4762 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4763 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4764 ; GCN2-NEXT: s_endpgm
4766 %ptr = getelementptr i64, ptr %out, i64 %index
4767 %gep = getelementptr i64, ptr %ptr, i64 4
4768 %tmp0 = atomicrmw volatile uinc_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
4769 store i64 %tmp0, ptr %out2
4773 define amdgpu_kernel void @atomic_inc_i64(ptr %out, i64 %in) {
4774 ; GCN1-LABEL: atomic_inc_i64:
4775 ; GCN1: ; %bb.0: ; %entry
4776 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4777 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4778 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
4779 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
4780 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4781 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4782 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4783 ; GCN1-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
4784 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4785 ; GCN1-NEXT: buffer_wbinvl1_vol
4786 ; GCN1-NEXT: s_endpgm
4788 ; GCN2-LABEL: atomic_inc_i64:
4789 ; GCN2: ; %bb.0: ; %entry
4790 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4791 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4792 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
4793 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
4794 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4795 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4796 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4797 ; GCN2-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3]
4798 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4799 ; GCN2-NEXT: buffer_wbinvl1_vol
4800 ; GCN2-NEXT: s_endpgm
4802 %tmp0 = atomicrmw volatile uinc_wrap ptr %out, i64 %in syncscope("agent") seq_cst
4806 define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
4807 ; GCN1-LABEL: atomic_inc_i64_ret:
4808 ; GCN1: ; %bb.0: ; %entry
4809 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
4810 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
4811 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4812 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
4813 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
4814 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4815 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4816 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4817 ; GCN1-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
4818 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4819 ; GCN1-NEXT: buffer_wbinvl1_vol
4820 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
4821 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
4822 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4823 ; GCN1-NEXT: s_endpgm
4825 ; GCN2-LABEL: atomic_inc_i64_ret:
4826 ; GCN2: ; %bb.0: ; %entry
4827 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
4828 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
4829 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4830 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
4831 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
4832 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4833 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4834 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4835 ; GCN2-NEXT: flat_atomic_inc_x2 v[0:1], v[0:1], v[2:3] glc
4836 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4837 ; GCN2-NEXT: buffer_wbinvl1_vol
4838 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
4839 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
4840 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4841 ; GCN2-NEXT: s_endpgm
4843 %tmp0 = atomicrmw volatile uinc_wrap ptr %out, i64 %in syncscope("agent") seq_cst
4844 store i64 %tmp0, ptr %out2
4848 define amdgpu_kernel void @atomic_inc_i64_incr64(ptr %out, i64 %in, i64 %index) {
4849 ; GCN1-LABEL: atomic_inc_i64_incr64:
4850 ; GCN1: ; %bb.0: ; %entry
4851 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
4852 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
4853 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4854 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
4855 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
4856 ; GCN1-NEXT: s_add_u32 s0, s4, s0
4857 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
4858 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4859 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
4860 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4861 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4862 ; GCN1-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
4863 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4864 ; GCN1-NEXT: buffer_wbinvl1_vol
4865 ; GCN1-NEXT: s_endpgm
4867 ; GCN2-LABEL: atomic_inc_i64_incr64:
4868 ; GCN2: ; %bb.0: ; %entry
4869 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
4870 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
4871 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4872 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
4873 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
4874 ; GCN2-NEXT: s_add_u32 s0, s4, s0
4875 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
4876 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4877 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
4878 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4879 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4880 ; GCN2-NEXT: flat_atomic_inc_x2 v[2:3], v[0:1]
4881 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4882 ; GCN2-NEXT: buffer_wbinvl1_vol
4883 ; GCN2-NEXT: s_endpgm
4885 %ptr = getelementptr i64, ptr %out, i64 %index
4886 %tmp0 = atomicrmw volatile uinc_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst
4890 define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
4891 ; GCN1-LABEL: atomic_inc_i64_ret_incr64:
4892 ; GCN1: ; %bb.0: ; %entry
4893 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
4894 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4895 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
4896 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
4897 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
4898 ; GCN1-NEXT: s_add_u32 s0, s0, s4
4899 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
4900 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4901 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4902 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4903 ; GCN1-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
4904 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4905 ; GCN1-NEXT: buffer_wbinvl1_vol
4906 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4907 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4908 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4909 ; GCN1-NEXT: s_endpgm
4911 ; GCN2-LABEL: atomic_inc_i64_ret_incr64:
4912 ; GCN2: ; %bb.0: ; %entry
4913 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
4914 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4915 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
4916 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
4917 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
4918 ; GCN2-NEXT: s_add_u32 s0, s0, s4
4919 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
4920 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4921 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4922 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4923 ; GCN2-NEXT: flat_atomic_inc_x2 v[0:1], v[2:3], v[0:1] glc
4924 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4925 ; GCN2-NEXT: buffer_wbinvl1_vol
4926 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
4927 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
4928 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4929 ; GCN2-NEXT: s_endpgm
4931 %ptr = getelementptr i64, ptr %out, i64 %index
4932 %tmp0 = atomicrmw volatile uinc_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst
4933 store i64 %tmp0, ptr %out2
4937 define amdgpu_kernel void @atomic_dec_i64_offset(ptr %out, i64 %in) {
4938 ; GCN1-LABEL: atomic_dec_i64_offset:
4939 ; GCN1: ; %bb.0: ; %entry
4940 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4941 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4942 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4943 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4944 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4945 ; GCN1-NEXT: v_mov_b32_e32 v0, s2
4946 ; GCN1-NEXT: v_mov_b32_e32 v1, s3
4947 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4948 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4949 ; GCN1-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
4950 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4951 ; GCN1-NEXT: buffer_wbinvl1_vol
4952 ; GCN1-NEXT: s_endpgm
4954 ; GCN2-LABEL: atomic_dec_i64_offset:
4955 ; GCN2: ; %bb.0: ; %entry
4956 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
4957 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
4958 ; GCN2-NEXT: s_add_u32 s0, s0, 32
4959 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
4960 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
4961 ; GCN2-NEXT: v_mov_b32_e32 v0, s2
4962 ; GCN2-NEXT: v_mov_b32_e32 v1, s3
4963 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
4964 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4965 ; GCN2-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
4966 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4967 ; GCN2-NEXT: buffer_wbinvl1_vol
4968 ; GCN2-NEXT: s_endpgm
4970 %gep = getelementptr i64, ptr %out, i64 4
4971 %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
4975 define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %in) {
4976 ; GCN1-LABEL: atomic_dec_i64_ret_offset:
4977 ; GCN1: ; %bb.0: ; %entry
4978 ; GCN1-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xd
4979 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
4980 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
4981 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
4982 ; GCN1-NEXT: s_add_u32 s0, s0, 32
4983 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
4984 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
4985 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
4986 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
4987 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4988 ; GCN1-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
4989 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
4990 ; GCN1-NEXT: buffer_wbinvl1_vol
4991 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
4992 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
4993 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
4994 ; GCN1-NEXT: s_endpgm
4996 ; GCN2-LABEL: atomic_dec_i64_ret_offset:
4997 ; GCN2: ; %bb.0: ; %entry
4998 ; GCN2-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x34
4999 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
5000 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
5001 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
5002 ; GCN2-NEXT: s_add_u32 s0, s0, 32
5003 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
5004 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
5005 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
5006 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
5007 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5008 ; GCN2-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
5009 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5010 ; GCN2-NEXT: buffer_wbinvl1_vol
5011 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
5012 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
5013 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
5014 ; GCN2-NEXT: s_endpgm
5016 %gep = getelementptr i64, ptr %out, i64 4
5017 %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
5018 store i64 %tmp0, ptr %out2
5022 define amdgpu_kernel void @atomic_dec_i64_decr64_offset(ptr %out, i64 %in, i64 %index) {
5023 ; GCN1-LABEL: atomic_dec_i64_decr64_offset:
5024 ; GCN1: ; %bb.0: ; %entry
5025 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
5026 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
5027 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
5028 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
5029 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
5030 ; GCN1-NEXT: s_add_u32 s0, s4, s0
5031 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
5032 ; GCN1-NEXT: s_add_u32 s0, s0, 32
5033 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
5034 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
5035 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
5036 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
5037 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5038 ; GCN1-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
5039 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5040 ; GCN1-NEXT: buffer_wbinvl1_vol
5041 ; GCN1-NEXT: s_endpgm
5043 ; GCN2-LABEL: atomic_dec_i64_decr64_offset:
5044 ; GCN2: ; %bb.0: ; %entry
5045 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
5046 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
5047 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
5048 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
5049 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
5050 ; GCN2-NEXT: s_add_u32 s0, s4, s0
5051 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
5052 ; GCN2-NEXT: s_add_u32 s0, s0, 32
5053 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
5054 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
5055 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
5056 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
5057 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5058 ; GCN2-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
5059 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5060 ; GCN2-NEXT: buffer_wbinvl1_vol
5061 ; GCN2-NEXT: s_endpgm
5063 %ptr = getelementptr i64, ptr %out, i64 %index
5064 %gep = getelementptr i64, ptr %ptr, i64 4
5065 %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
5069 define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2, i64 %in, i64 %index) {
5070 ; GCN1-LABEL: atomic_dec_i64_ret_decr64_offset:
5071 ; GCN1: ; %bb.0: ; %entry
5072 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
5073 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
5074 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
5075 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
5076 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
5077 ; GCN1-NEXT: s_add_u32 s0, s0, s4
5078 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
5079 ; GCN1-NEXT: s_add_u32 s0, s0, 32
5080 ; GCN1-NEXT: s_addc_u32 s1, s1, 0
5081 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
5082 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
5083 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5084 ; GCN1-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
5085 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5086 ; GCN1-NEXT: buffer_wbinvl1_vol
5087 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
5088 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
5089 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
5090 ; GCN1-NEXT: s_endpgm
5092 ; GCN2-LABEL: atomic_dec_i64_ret_decr64_offset:
5093 ; GCN2: ; %bb.0: ; %entry
5094 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
5095 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
5096 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
5097 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
5098 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
5099 ; GCN2-NEXT: s_add_u32 s0, s0, s4
5100 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
5101 ; GCN2-NEXT: s_add_u32 s0, s0, 32
5102 ; GCN2-NEXT: s_addc_u32 s1, s1, 0
5103 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
5104 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
5105 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5106 ; GCN2-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
5107 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5108 ; GCN2-NEXT: buffer_wbinvl1_vol
5109 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
5110 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
5111 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
5112 ; GCN2-NEXT: s_endpgm
5114 %ptr = getelementptr i64, ptr %out, i64 %index
5115 %gep = getelementptr i64, ptr %ptr, i64 4
5116 %tmp0 = atomicrmw volatile udec_wrap ptr %gep, i64 %in syncscope("agent") seq_cst
5117 store i64 %tmp0, ptr %out2
5121 define amdgpu_kernel void @atomic_dec_i64(ptr %out, i64 %in) {
5122 ; GCN1-LABEL: atomic_dec_i64:
5123 ; GCN1: ; %bb.0: ; %entry
5124 ; GCN1-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
5125 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
5126 ; GCN1-NEXT: v_mov_b32_e32 v0, s0
5127 ; GCN1-NEXT: v_mov_b32_e32 v1, s1
5128 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
5129 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
5130 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5131 ; GCN1-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
5132 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5133 ; GCN1-NEXT: buffer_wbinvl1_vol
5134 ; GCN1-NEXT: s_endpgm
5136 ; GCN2-LABEL: atomic_dec_i64:
5137 ; GCN2: ; %bb.0: ; %entry
5138 ; GCN2-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x24
5139 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
5140 ; GCN2-NEXT: v_mov_b32_e32 v0, s0
5141 ; GCN2-NEXT: v_mov_b32_e32 v1, s1
5142 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
5143 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
5144 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5145 ; GCN2-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3]
5146 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5147 ; GCN2-NEXT: buffer_wbinvl1_vol
5148 ; GCN2-NEXT: s_endpgm
5150 %tmp0 = atomicrmw volatile udec_wrap ptr %out, i64 %in syncscope("agent") seq_cst
5154 define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
5155 ; GCN1-LABEL: atomic_dec_i64_ret:
5156 ; GCN1: ; %bb.0: ; %entry
5157 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
5158 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
5159 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
5160 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
5161 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
5162 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
5163 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
5164 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5165 ; GCN1-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
5166 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5167 ; GCN1-NEXT: buffer_wbinvl1_vol
5168 ; GCN1-NEXT: v_mov_b32_e32 v2, s6
5169 ; GCN1-NEXT: v_mov_b32_e32 v3, s7
5170 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
5171 ; GCN1-NEXT: s_endpgm
5173 ; GCN2-LABEL: atomic_dec_i64_ret:
5174 ; GCN2: ; %bb.0: ; %entry
5175 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
5176 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
5177 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
5178 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
5179 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
5180 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
5181 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
5182 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5183 ; GCN2-NEXT: flat_atomic_dec_x2 v[0:1], v[0:1], v[2:3] glc
5184 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5185 ; GCN2-NEXT: buffer_wbinvl1_vol
5186 ; GCN2-NEXT: v_mov_b32_e32 v2, s6
5187 ; GCN2-NEXT: v_mov_b32_e32 v3, s7
5188 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
5189 ; GCN2-NEXT: s_endpgm
5191 %tmp0 = atomicrmw volatile udec_wrap ptr %out, i64 %in syncscope("agent") seq_cst
5192 store i64 %tmp0, ptr %out2
5196 define amdgpu_kernel void @atomic_dec_i64_decr64(ptr %out, i64 %in, i64 %index) {
5197 ; GCN1-LABEL: atomic_dec_i64_decr64:
5198 ; GCN1: ; %bb.0: ; %entry
5199 ; GCN1-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
5200 ; GCN1-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
5201 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
5202 ; GCN1-NEXT: v_mov_b32_e32 v0, s6
5203 ; GCN1-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
5204 ; GCN1-NEXT: s_add_u32 s0, s4, s0
5205 ; GCN1-NEXT: s_addc_u32 s1, s5, s1
5206 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
5207 ; GCN1-NEXT: v_mov_b32_e32 v1, s7
5208 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
5209 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5210 ; GCN1-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
5211 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5212 ; GCN1-NEXT: buffer_wbinvl1_vol
5213 ; GCN1-NEXT: s_endpgm
5215 ; GCN2-LABEL: atomic_dec_i64_decr64:
5216 ; GCN2: ; %bb.0: ; %entry
5217 ; GCN2-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
5218 ; GCN2-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x34
5219 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
5220 ; GCN2-NEXT: v_mov_b32_e32 v0, s6
5221 ; GCN2-NEXT: s_lshl_b64 s[0:1], s[0:1], 3
5222 ; GCN2-NEXT: s_add_u32 s0, s4, s0
5223 ; GCN2-NEXT: s_addc_u32 s1, s5, s1
5224 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
5225 ; GCN2-NEXT: v_mov_b32_e32 v1, s7
5226 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
5227 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5228 ; GCN2-NEXT: flat_atomic_dec_x2 v[2:3], v[0:1]
5229 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5230 ; GCN2-NEXT: buffer_wbinvl1_vol
5231 ; GCN2-NEXT: s_endpgm
5233 %ptr = getelementptr i64, ptr %out, i64 %index
5234 %tmp0 = atomicrmw volatile udec_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst
5238 define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %in, i64 %index) {
5239 ; GCN1-LABEL: atomic_dec_i64_ret_decr64:
5240 ; GCN1: ; %bb.0: ; %entry
5241 ; GCN1-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x9
5242 ; GCN1-NEXT: s_waitcnt lgkmcnt(0)
5243 ; GCN1-NEXT: v_mov_b32_e32 v0, s4
5244 ; GCN1-NEXT: v_mov_b32_e32 v1, s5
5245 ; GCN1-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
5246 ; GCN1-NEXT: s_add_u32 s0, s0, s4
5247 ; GCN1-NEXT: s_addc_u32 s1, s1, s5
5248 ; GCN1-NEXT: v_mov_b32_e32 v3, s1
5249 ; GCN1-NEXT: v_mov_b32_e32 v2, s0
5250 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5251 ; GCN1-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
5252 ; GCN1-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5253 ; GCN1-NEXT: buffer_wbinvl1_vol
5254 ; GCN1-NEXT: v_mov_b32_e32 v2, s2
5255 ; GCN1-NEXT: v_mov_b32_e32 v3, s3
5256 ; GCN1-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
5257 ; GCN1-NEXT: s_endpgm
5259 ; GCN2-LABEL: atomic_dec_i64_ret_decr64:
5260 ; GCN2: ; %bb.0: ; %entry
5261 ; GCN2-NEXT: s_load_dwordx8 s[0:7], s[0:1], 0x24
5262 ; GCN2-NEXT: s_waitcnt lgkmcnt(0)
5263 ; GCN2-NEXT: v_mov_b32_e32 v0, s4
5264 ; GCN2-NEXT: v_mov_b32_e32 v1, s5
5265 ; GCN2-NEXT: s_lshl_b64 s[4:5], s[6:7], 3
5266 ; GCN2-NEXT: s_add_u32 s0, s0, s4
5267 ; GCN2-NEXT: s_addc_u32 s1, s1, s5
5268 ; GCN2-NEXT: v_mov_b32_e32 v3, s1
5269 ; GCN2-NEXT: v_mov_b32_e32 v2, s0
5270 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5271 ; GCN2-NEXT: flat_atomic_dec_x2 v[0:1], v[2:3], v[0:1] glc
5272 ; GCN2-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
5273 ; GCN2-NEXT: buffer_wbinvl1_vol
5274 ; GCN2-NEXT: v_mov_b32_e32 v2, s2
5275 ; GCN2-NEXT: v_mov_b32_e32 v3, s3
5276 ; GCN2-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
5277 ; GCN2-NEXT: s_endpgm
5279 %ptr = getelementptr i64, ptr %out, i64 %index
5280 %tmp0 = atomicrmw volatile udec_wrap ptr %ptr, i64 %in syncscope("agent") seq_cst
5281 store i64 %tmp0, ptr %out2