1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: amdgpu-registered-target
3 // RUN: %clang_cc1 %s -x hip -fcuda-is-device -emit-llvm -O0 -o - \
4 // RUN: -triple=amdgcn-amd-amdhsa | FileCheck %s
6 // CHECK-LABEL: @_Z29test_non_volatile_parameter32Pj(
8 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
9 // CHECK-NEXT: [[RES:%.*]] = alloca i32, align 4, addrspace(5)
10 // CHECK-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr
11 // CHECK-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES]] to ptr
12 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR_ASCAST]], align 8
13 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
14 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
15 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
16 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
17 // CHECK-NEXT: store i32 [[TMP3]], ptr [[RES_ASCAST]], align 4
18 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
19 // CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
20 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr [[TMP5]], align 4
21 // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4
22 // CHECK-NEXT: store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4
23 // CHECK-NEXT: ret void
25 __attribute__((device
)) void test_non_volatile_parameter32(__UINT32_TYPE__
*ptr
) {
27 res
= __builtin_amdgcn_atomic_inc32(ptr
, *ptr
, __ATOMIC_SEQ_CST
, "workgroup");
29 res
= __builtin_amdgcn_atomic_dec32(ptr
, *ptr
, __ATOMIC_SEQ_CST
, "workgroup");
32 // CHECK-LABEL: @_Z29test_non_volatile_parameter64Py(
34 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
35 // CHECK-NEXT: [[RES:%.*]] = alloca i64, align 8, addrspace(5)
36 // CHECK-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr
37 // CHECK-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES]] to ptr
38 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR_ASCAST]], align 8
39 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
40 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
41 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[TMP1]], align 8
42 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
43 // CHECK-NEXT: store i64 [[TMP3]], ptr [[RES_ASCAST]], align 8
44 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
45 // CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
46 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr [[TMP5]], align 8
47 // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8
48 // CHECK-NEXT: store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8
49 // CHECK-NEXT: ret void
51 __attribute__((device
)) void test_non_volatile_parameter64(__UINT64_TYPE__
*ptr
) {
53 res
= __builtin_amdgcn_atomic_inc64(ptr
, *ptr
, __ATOMIC_SEQ_CST
, "workgroup");
55 res
= __builtin_amdgcn_atomic_dec64(ptr
, *ptr
, __ATOMIC_SEQ_CST
, "workgroup");
58 // CHECK-LABEL: @_Z25test_volatile_parameter32PVj(
60 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
61 // CHECK-NEXT: [[RES:%.*]] = alloca i32, align 4, addrspace(5)
62 // CHECK-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr
63 // CHECK-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES]] to ptr
64 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR_ASCAST]], align 8
65 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
66 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
67 // CHECK-NEXT: [[TMP2:%.*]] = load volatile i32, ptr [[TMP1]], align 4
68 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr [[TMP0]], i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
69 // CHECK-NEXT: store i32 [[TMP3]], ptr [[RES_ASCAST]], align 4
70 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
71 // CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
72 // CHECK-NEXT: [[TMP6:%.*]] = load volatile i32, ptr [[TMP5]], align 4
73 // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i32 [[TMP6]] syncscope("workgroup") seq_cst, align 4
74 // CHECK-NEXT: store i32 [[TMP7]], ptr [[RES_ASCAST]], align 4
75 // CHECK-NEXT: ret void
77 __attribute__((device
)) void test_volatile_parameter32(volatile __UINT32_TYPE__
*ptr
) {
79 res
= __builtin_amdgcn_atomic_inc32(ptr
, *ptr
, __ATOMIC_SEQ_CST
, "workgroup");
81 res
= __builtin_amdgcn_atomic_dec32(ptr
, *ptr
, __ATOMIC_SEQ_CST
, "workgroup");
84 // CHECK-LABEL: @_Z25test_volatile_parameter64PVy(
86 // CHECK-NEXT: [[PTR_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
87 // CHECK-NEXT: [[RES:%.*]] = alloca i64, align 8, addrspace(5)
88 // CHECK-NEXT: [[PTR_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[PTR_ADDR]] to ptr
89 // CHECK-NEXT: [[RES_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RES]] to ptr
90 // CHECK-NEXT: store ptr [[PTR:%.*]], ptr [[PTR_ADDR_ASCAST]], align 8
91 // CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
92 // CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
93 // CHECK-NEXT: [[TMP2:%.*]] = load volatile i64, ptr [[TMP1]], align 8
94 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw volatile uinc_wrap ptr [[TMP0]], i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
95 // CHECK-NEXT: store i64 [[TMP3]], ptr [[RES_ASCAST]], align 8
96 // CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
97 // CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[PTR_ADDR_ASCAST]], align 8
98 // CHECK-NEXT: [[TMP6:%.*]] = load volatile i64, ptr [[TMP5]], align 8
99 // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw volatile udec_wrap ptr [[TMP4]], i64 [[TMP6]] syncscope("workgroup") seq_cst, align 8
100 // CHECK-NEXT: store i64 [[TMP7]], ptr [[RES_ASCAST]], align 8
101 // CHECK-NEXT: ret void
103 __attribute__((device
)) void test_volatile_parameter64(volatile __UINT64_TYPE__
*ptr
) {
105 res
= __builtin_amdgcn_atomic_inc64(ptr
, *ptr
, __ATOMIC_SEQ_CST
, "workgroup");
107 res
= __builtin_amdgcn_atomic_dec64(ptr
, *ptr
, __ATOMIC_SEQ_CST
, "workgroup");
110 // CHECK-LABEL: @_Z13test_shared32v(
111 // CHECK-NEXT: entry:
112 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
113 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
114 // CHECK-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
115 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
116 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
117 // CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared32vE3val to ptr), align 4
118 // CHECK-NEXT: ret void
120 __attribute__((device
)) void test_shared32() {
121 __attribute__((shared
)) __UINT32_TYPE__ val
;
123 val
= __builtin_amdgcn_atomic_inc32(&val
, val
, __ATOMIC_SEQ_CST
, "workgroup");
125 val
= __builtin_amdgcn_atomic_dec32(&val
, val
, __ATOMIC_SEQ_CST
, "workgroup");
128 // CHECK-LABEL: @_Z13test_shared64v(
129 // CHECK-NEXT: entry:
130 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
131 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
132 // CHECK-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
133 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
134 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
135 // CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ13test_shared64vE3val to ptr), align 8
136 // CHECK-NEXT: ret void
138 __attribute__((device
)) void test_shared64() {
139 __attribute__((shared
)) __UINT64_TYPE__ val
;
141 val
= __builtin_amdgcn_atomic_inc64(&val
, val
, __ATOMIC_SEQ_CST
, "workgroup");
143 val
= __builtin_amdgcn_atomic_dec64(&val
, val
, __ATOMIC_SEQ_CST
, "workgroup");
146 __attribute__((device
)) __UINT32_TYPE__ global_val32
;
147 // CHECK-LABEL: @_Z13test_global32v(
148 // CHECK-NEXT: entry:
149 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
150 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
151 // CHECK-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
152 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
153 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
154 // CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val32 to ptr), align 4
155 // CHECK-NEXT: ret void
157 __attribute__((device
)) void test_global32() {
158 global_val32
= __builtin_amdgcn_atomic_inc32(&global_val32
, global_val32
, __ATOMIC_SEQ_CST
, "workgroup");
160 global_val32
= __builtin_amdgcn_atomic_dec32(&global_val32
, global_val32
, __ATOMIC_SEQ_CST
, "workgroup");
163 __attribute__((device
)) __UINT64_TYPE__ global_val64
;
164 // CHECK-LABEL: @_Z13test_global64v(
165 // CHECK-NEXT: entry:
166 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
167 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
168 // CHECK-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
169 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
170 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
171 // CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(1) @global_val64 to ptr), align 8
172 // CHECK-NEXT: ret void
174 __attribute__((device
)) void test_global64() {
175 global_val64
= __builtin_amdgcn_atomic_inc64(&global_val64
, global_val64
, __ATOMIC_SEQ_CST
, "workgroup");
177 global_val64
= __builtin_amdgcn_atomic_dec64(&global_val64
, global_val64
, __ATOMIC_SEQ_CST
, "workgroup");
180 __attribute__((constant
)) __UINT32_TYPE__ cval32
;
181 // CHECK-LABEL: @_Z15test_constant32v(
182 // CHECK-NEXT: entry:
183 // CHECK-NEXT: [[LOCAL_VAL:%.*]] = alloca i32, align 4, addrspace(5)
184 // CHECK-NEXT: [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_VAL]] to ptr
185 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), align 4
186 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP0]] syncscope("workgroup") seq_cst, align 4
187 // CHECK-NEXT: store i32 [[TMP1]], ptr [[LOCAL_VAL_ASCAST]], align 4
188 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), align 4
189 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval32 to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
190 // CHECK-NEXT: store i32 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 4
191 // CHECK-NEXT: ret void
193 __attribute__((device
)) void test_constant32() {
194 __UINT32_TYPE__ local_val
;
196 local_val
= __builtin_amdgcn_atomic_inc32(&cval32
, cval32
, __ATOMIC_SEQ_CST
, "workgroup");
198 local_val
= __builtin_amdgcn_atomic_dec32(&cval32
, cval32
, __ATOMIC_SEQ_CST
, "workgroup");
201 __attribute__((constant
)) __UINT64_TYPE__ cval64
;
202 // CHECK-LABEL: @_Z15test_constant64v(
203 // CHECK-NEXT: entry:
204 // CHECK-NEXT: [[LOCAL_VAL:%.*]] = alloca i64, align 8, addrspace(5)
205 // CHECK-NEXT: [[LOCAL_VAL_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[LOCAL_VAL]] to ptr
206 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), align 8
207 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP0]] syncscope("workgroup") seq_cst, align 8
208 // CHECK-NEXT: store i64 [[TMP1]], ptr [[LOCAL_VAL_ASCAST]], align 8
209 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), align 8
210 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(4) @cval64 to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
211 // CHECK-NEXT: store i64 [[TMP3]], ptr [[LOCAL_VAL_ASCAST]], align 8
212 // CHECK-NEXT: ret void
214 __attribute__((device
)) void test_constant64() {
215 __UINT64_TYPE__ local_val
;
217 local_val
= __builtin_amdgcn_atomic_inc64(&cval64
, cval64
, __ATOMIC_SEQ_CST
, "workgroup");
219 local_val
= __builtin_amdgcn_atomic_dec64(&cval64
, cval64
, __ATOMIC_SEQ_CST
, "workgroup");
222 // CHECK-LABEL: @_Z12test_order32v(
223 // CHECK-NEXT: entry:
224 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
225 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP0]] syncscope("workgroup") monotonic, align 4
226 // CHECK-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
227 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
228 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") acquire, align 4
229 // CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
230 // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
231 // CHECK-NEXT: [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP4]] syncscope("workgroup") acquire, align 4
232 // CHECK-NEXT: store i32 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
233 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
234 // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP6]] syncscope("workgroup") release, align 4
235 // CHECK-NEXT: store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
236 // CHECK-NEXT: [[TMP8:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
237 // CHECK-NEXT: [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP8]] syncscope("workgroup") acq_rel, align 4
238 // CHECK-NEXT: store i32 [[TMP9]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
239 // CHECK-NEXT: [[TMP10:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
240 // CHECK-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), i32 [[TMP10]] syncscope("workgroup") seq_cst, align 4
241 // CHECK-NEXT: store i32 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order32vE3val to ptr), align 4
242 // CHECK-NEXT: ret void
244 __attribute__((device
)) void test_order32() {
245 __attribute__((shared
)) __UINT32_TYPE__ val
;
247 val
= __builtin_amdgcn_atomic_inc32(&val
, val
, __ATOMIC_RELAXED
, "workgroup");
249 val
= __builtin_amdgcn_atomic_inc32(&val
, val
, __ATOMIC_CONSUME
, "workgroup");
251 val
= __builtin_amdgcn_atomic_inc32(&val
, val
, __ATOMIC_ACQUIRE
, "workgroup");
253 val
= __builtin_amdgcn_atomic_dec32(&val
, val
, __ATOMIC_RELEASE
, "workgroup");
255 val
= __builtin_amdgcn_atomic_dec32(&val
, val
, __ATOMIC_ACQ_REL
, "workgroup");
257 val
= __builtin_amdgcn_atomic_dec32(&val
, val
, __ATOMIC_SEQ_CST
, "workgroup");
260 // CHECK-LABEL: @_Z12test_order64v(
261 // CHECK-NEXT: entry:
262 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
263 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP0]] syncscope("workgroup") monotonic, align 8
264 // CHECK-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
265 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
266 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") acquire, align 8
267 // CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
268 // CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
269 // CHECK-NEXT: [[TMP5:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP4]] syncscope("workgroup") acquire, align 8
270 // CHECK-NEXT: store i64 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
271 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
272 // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP6]] syncscope("workgroup") release, align 8
273 // CHECK-NEXT: store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
274 // CHECK-NEXT: [[TMP8:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
275 // CHECK-NEXT: [[TMP9:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP8]] syncscope("workgroup") acq_rel, align 8
276 // CHECK-NEXT: store i64 [[TMP9]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
277 // CHECK-NEXT: [[TMP10:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
278 // CHECK-NEXT: [[TMP11:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), i64 [[TMP10]] syncscope("workgroup") seq_cst, align 8
279 // CHECK-NEXT: store i64 [[TMP11]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_order64vE3val to ptr), align 8
280 // CHECK-NEXT: ret void
282 __attribute__((device
)) void test_order64() {
283 __attribute__((shared
)) __UINT64_TYPE__ val
;
285 val
= __builtin_amdgcn_atomic_inc64(&val
, val
, __ATOMIC_RELAXED
, "workgroup");
287 val
= __builtin_amdgcn_atomic_dec64(&val
, val
, __ATOMIC_CONSUME
, "workgroup");
289 val
= __builtin_amdgcn_atomic_inc64(&val
, val
, __ATOMIC_ACQUIRE
, "workgroup");
291 val
= __builtin_amdgcn_atomic_dec64(&val
, val
, __ATOMIC_RELEASE
, "workgroup");
293 val
= __builtin_amdgcn_atomic_dec64(&val
, val
, __ATOMIC_ACQ_REL
, "workgroup");
295 val
= __builtin_amdgcn_atomic_dec64(&val
, val
, __ATOMIC_SEQ_CST
, "workgroup");
298 // CHECK-LABEL: @_Z12test_scope32v(
299 // CHECK-NEXT: entry:
300 // CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
301 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP0]] seq_cst, align 4
302 // CHECK-NEXT: store i32 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
303 // CHECK-NEXT: [[TMP2:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
304 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP2]] syncscope("workgroup") seq_cst, align 4
305 // CHECK-NEXT: store i32 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
306 // CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
307 // CHECK-NEXT: [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP4]] syncscope("agent") seq_cst, align 4
308 // CHECK-NEXT: store i32 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
309 // CHECK-NEXT: [[TMP6:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
310 // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), i32 [[TMP6]] syncscope("wavefront") seq_cst, align 4
311 // CHECK-NEXT: store i32 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope32vE3val to ptr), align 4
312 // CHECK-NEXT: ret void
314 __attribute__((device
)) void test_scope32() {
315 __attribute__((shared
)) __UINT32_TYPE__ val
;
317 val
= __builtin_amdgcn_atomic_inc32(&val
, val
, __ATOMIC_SEQ_CST
, "");
319 val
= __builtin_amdgcn_atomic_dec32(&val
, val
, __ATOMIC_SEQ_CST
, "workgroup");
321 val
= __builtin_amdgcn_atomic_dec32(&val
, val
, __ATOMIC_SEQ_CST
, "agent");
323 val
= __builtin_amdgcn_atomic_dec32(&val
, val
, __ATOMIC_SEQ_CST
, "wavefront");
326 // CHECK-LABEL: @_Z12test_scope64v(
327 // CHECK-NEXT: entry:
328 // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
329 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw uinc_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP0]] seq_cst, align 8
330 // CHECK-NEXT: store i64 [[TMP1]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
331 // CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
332 // CHECK-NEXT: [[TMP3:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP2]] syncscope("workgroup") seq_cst, align 8
333 // CHECK-NEXT: store i64 [[TMP3]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
334 // CHECK-NEXT: [[TMP4:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
335 // CHECK-NEXT: [[TMP5:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP4]] syncscope("agent") seq_cst, align 8
336 // CHECK-NEXT: store i64 [[TMP5]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
337 // CHECK-NEXT: [[TMP6:%.*]] = load i64, ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
338 // CHECK-NEXT: [[TMP7:%.*]] = atomicrmw udec_wrap ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), i64 [[TMP6]] syncscope("wavefront") seq_cst, align 8
339 // CHECK-NEXT: store i64 [[TMP7]], ptr addrspacecast (ptr addrspace(3) @_ZZ12test_scope64vE3val to ptr), align 8
340 // CHECK-NEXT: ret void
342 __attribute__((device
)) void test_scope64() {
343 __attribute__((shared
)) __UINT64_TYPE__ val
;
345 val
= __builtin_amdgcn_atomic_inc64(&val
, val
, __ATOMIC_SEQ_CST
, "");
347 val
= __builtin_amdgcn_atomic_dec64(&val
, val
, __ATOMIC_SEQ_CST
, "workgroup");
349 val
= __builtin_amdgcn_atomic_dec64(&val
, val
, __ATOMIC_SEQ_CST
, "agent");
351 val
= __builtin_amdgcn_atomic_dec64(&val
, val
, __ATOMIC_SEQ_CST
, "wavefront");