1 //===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Include all synchronization.
11 //===----------------------------------------------------------------------===//
13 #include "Synchronization.h"
16 #include "Interface.h"
22 #pragma omp declare target
31 /// NOTE: This function needs to be implemented by every target.
32 uint32_t atomicInc(uint32_t *Address
, uint32_t Val
, int Ordering
);
34 uint32_t atomicLoad(uint32_t *Address
, int Ordering
) {
35 return __atomic_fetch_add(Address
, 0U, __ATOMIC_SEQ_CST
);
38 void atomicStore(uint32_t *Address
, uint32_t Val
, int Ordering
) {
39 __atomic_store_n(Address
, Val
, Ordering
);
42 uint32_t atomicAdd(uint32_t *Address
, uint32_t Val
, int Ordering
) {
43 return __atomic_fetch_add(Address
, Val
, Ordering
);
45 uint32_t atomicMax(uint32_t *Address
, uint32_t Val
, int Ordering
) {
46 return __atomic_fetch_max(Address
, Val
, Ordering
);
49 uint32_t atomicExchange(uint32_t *Address
, uint32_t Val
, int Ordering
) {
51 __atomic_exchange(Address
, &Val
, &R
, Ordering
);
54 uint32_t atomicCAS(uint32_t *Address
, uint32_t Compare
, uint32_t Val
,
56 (void)__atomic_compare_exchange(Address
, &Compare
, &Val
, false, Ordering
,
61 uint64_t atomicAdd(uint64_t *Address
, uint64_t Val
, int Ordering
) {
62 return __atomic_fetch_add(Address
, Val
, Ordering
);
66 /// AMDGCN Implementation
69 #pragma omp begin declare variant match(device = {arch(amdgcn)})
71 uint32_t atomicInc(uint32_t *A
, uint32_t V
, int Ordering
) {
72 // builtin_amdgcn_atomic_inc32 should expand to this switch when
73 // passed a runtime value, but does not do so yet. Workaround here.
76 __builtin_unreachable();
77 case __ATOMIC_RELAXED
:
78 return __builtin_amdgcn_atomic_inc32(A
, V
, __ATOMIC_RELAXED
, "");
79 case __ATOMIC_ACQUIRE
:
80 return __builtin_amdgcn_atomic_inc32(A
, V
, __ATOMIC_ACQUIRE
, "");
81 case __ATOMIC_RELEASE
:
82 return __builtin_amdgcn_atomic_inc32(A
, V
, __ATOMIC_RELEASE
, "");
83 case __ATOMIC_ACQ_REL
:
84 return __builtin_amdgcn_atomic_inc32(A
, V
, __ATOMIC_ACQ_REL
, "");
85 case __ATOMIC_SEQ_CST
:
86 return __builtin_amdgcn_atomic_inc32(A
, V
, __ATOMIC_SEQ_CST
, "");
90 uint32_t SHARED(namedBarrierTracker
);
92 void namedBarrierInit() {
93 // Don't have global ctors, and shared memory is not zero init
94 atomic::store(&namedBarrierTracker
, 0u, __ATOMIC_RELEASE
);
98 uint32_t NumThreads
= omp_get_num_threads();
99 // assert(NumThreads % 32 == 0);
101 uint32_t WarpSize
= mapping::getWarpSize();
102 uint32_t NumWaves
= NumThreads
/ WarpSize
;
104 fence::team(__ATOMIC_ACQUIRE
);
106 // named barrier implementation for amdgcn.
107 // Uses two 16 bit unsigned counters. One for the number of waves to have
108 // reached the barrier, and one to count how many times the barrier has been
109 // passed. These are packed in a single atomically accessed 32 bit integer.
110 // Low bits for the number of waves, assumed zero before this call.
111 // High bits to count the number of times the barrier has been passed.
113 // precondition: NumWaves != 0;
114 // invariant: NumWaves * WarpSize == NumThreads;
115 // precondition: NumWaves < 0xffffu;
117 // Increment the low 16 bits once, using the lowest active thread.
118 if (mapping::isLeaderInWarp()) {
119 uint32_t load
= atomic::add(&namedBarrierTracker
, 1,
120 __ATOMIC_RELAXED
); // commutative
122 // Record the number of times the barrier has been passed
123 uint32_t generation
= load
& 0xffff0000u
;
125 if ((load
& 0x0000ffffu
) == (NumWaves
- 1)) {
126 // Reached NumWaves in low bits so this is the last wave.
127 // Set low bits to zero and increment high bits
128 load
+= 0x00010000u
; // wrap is safe
129 load
&= 0xffff0000u
; // because bits zeroed second
131 // Reset the wave counter and release the waiting waves
132 atomic::store(&namedBarrierTracker
, load
, __ATOMIC_RELAXED
);
134 // more waves still to go, spin until generation counter changes
136 __builtin_amdgcn_s_sleep(0);
137 load
= atomic::load(&namedBarrierTracker
, __ATOMIC_RELAXED
);
138 } while ((load
& 0xffff0000u
) == generation
);
141 fence::team(__ATOMIC_RELEASE
);
144 // sema checking of amdgcn_fence is aggressive. Intention is to patch clang
145 // so that it is usable within a template environment and so that a runtime
146 // value of the memory order is expanded to this switch within clang/llvm.
147 void fenceTeam(int Ordering
) {
150 __builtin_unreachable();
151 case __ATOMIC_ACQUIRE
:
152 return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE
, "workgroup");
153 case __ATOMIC_RELEASE
:
154 return __builtin_amdgcn_fence(__ATOMIC_RELEASE
, "workgroup");
155 case __ATOMIC_ACQ_REL
:
156 return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL
, "workgroup");
157 case __ATOMIC_SEQ_CST
:
158 return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST
, "workgroup");
161 void fenceKernel(int Ordering
) {
164 __builtin_unreachable();
165 case __ATOMIC_ACQUIRE
:
166 return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE
, "agent");
167 case __ATOMIC_RELEASE
:
168 return __builtin_amdgcn_fence(__ATOMIC_RELEASE
, "agent");
169 case __ATOMIC_ACQ_REL
:
170 return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL
, "agent");
171 case __ATOMIC_SEQ_CST
:
172 return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST
, "agent");
175 void fenceSystem(int Ordering
) {
178 __builtin_unreachable();
179 case __ATOMIC_ACQUIRE
:
180 return __builtin_amdgcn_fence(__ATOMIC_ACQUIRE
, "");
181 case __ATOMIC_RELEASE
:
182 return __builtin_amdgcn_fence(__ATOMIC_RELEASE
, "");
183 case __ATOMIC_ACQ_REL
:
184 return __builtin_amdgcn_fence(__ATOMIC_ACQ_REL
, "");
185 case __ATOMIC_SEQ_CST
:
186 return __builtin_amdgcn_fence(__ATOMIC_SEQ_CST
, "");
190 void syncWarp(__kmpc_impl_lanemask_t
) {
191 // AMDGCN doesn't need to sync threads in a warp
194 void syncThreads() { __builtin_amdgcn_s_barrier(); }
195 void syncThreadsAligned() { syncThreads(); }
197 // TODO: Don't have wavefront lane locks. Possibly can't have them.
198 void unsetLock(omp_lock_t
*) { __builtin_trap(); }
199 int testLock(omp_lock_t
*) { __builtin_trap(); }
200 void initLock(omp_lock_t
*) { __builtin_trap(); }
201 void destroyLock(omp_lock_t
*) { __builtin_trap(); }
202 void setLock(omp_lock_t
*) { __builtin_trap(); }
204 #pragma omp end declare variant
207 /// NVPTX Implementation
210 #pragma omp begin declare variant match( \
211 device = {arch(nvptx, nvptx64)}, implementation = {extension(match_any)})
213 uint32_t atomicInc(uint32_t *Address
, uint32_t Val
, int Ordering
) {
214 return __nvvm_atom_inc_gen_ui(Address
, Val
);
217 void namedBarrierInit() {}
219 void namedBarrier() {
220 uint32_t NumThreads
= omp_get_num_threads();
221 ASSERT(NumThreads
% 32 == 0);
223 // The named barrier for active parallel threads of a team in an L1 parallel
224 // region to synchronize with each other.
225 constexpr int BarrierNo
= 7;
226 asm volatile("barrier.sync %0, %1;"
228 : "r"(BarrierNo
), "r"(NumThreads
)
232 void fenceTeam(int) { __nvvm_membar_cta(); }
234 void fenceKernel(int) { __nvvm_membar_gl(); }
236 void fenceSystem(int) { __nvvm_membar_sys(); }
238 void syncWarp(__kmpc_impl_lanemask_t Mask
) { __nvvm_bar_warp_sync(Mask
); }
241 constexpr int BarrierNo
= 8;
242 asm volatile("barrier.sync %0;" : : "r"(BarrierNo
) : "memory");
245 void syncThreadsAligned() { __syncthreads(); }
247 constexpr uint32_t OMP_SPIN
= 1000;
248 constexpr uint32_t UNSET
= 0;
249 constexpr uint32_t SET
= 1;
251 // TODO: This seems to hide a bug in the declare variant handling. If it is
252 // called before it is defined
253 // here the overload won't happen. Investigate lalter!
254 void unsetLock(omp_lock_t
*Lock
) {
255 (void)atomicExchange((uint32_t *)Lock
, UNSET
, __ATOMIC_SEQ_CST
);
258 int testLock(omp_lock_t
*Lock
) {
259 return atomicAdd((uint32_t *)Lock
, 0u, __ATOMIC_SEQ_CST
);
262 void initLock(omp_lock_t
*Lock
) { unsetLock(Lock
); }
264 void destroyLock(omp_lock_t
*Lock
) { unsetLock(Lock
); }
266 void setLock(omp_lock_t
*Lock
) {
267 // TODO: not sure spinning is a good idea here..
268 while (atomicCAS((uint32_t *)Lock
, UNSET
, SET
, __ATOMIC_SEQ_CST
) != UNSET
) {
269 int32_t start
= __nvvm_read_ptx_sreg_clock();
272 now
= __nvvm_read_ptx_sreg_clock();
273 int32_t cycles
= now
> start
? now
- start
: now
+ (0xffffffff - start
);
274 if (cycles
>= OMP_SPIN
* mapping::getBlockId()) {
278 } // wait for 0 to be the read value
281 #pragma omp end declare variant
286 void synchronize::init(bool IsSPMD
) {
288 impl::namedBarrierInit();
291 void synchronize::warp(LaneMaskTy Mask
) { impl::syncWarp(Mask
); }
293 void synchronize::threads() { impl::syncThreads(); }
295 void synchronize::threadsAligned() { impl::syncThreadsAligned(); }
297 void fence::team(int Ordering
) { impl::fenceTeam(Ordering
); }
299 void fence::kernel(int Ordering
) { impl::fenceKernel(Ordering
); }
301 void fence::system(int Ordering
) { impl::fenceSystem(Ordering
); }
303 uint32_t atomic::load(uint32_t *Addr
, int Ordering
) {
304 return impl::atomicLoad(Addr
, Ordering
);
307 void atomic::store(uint32_t *Addr
, uint32_t V
, int Ordering
) {
308 impl::atomicStore(Addr
, V
, Ordering
);
311 uint32_t atomic::inc(uint32_t *Addr
, uint32_t V
, int Ordering
) {
312 return impl::atomicInc(Addr
, V
, Ordering
);
315 uint32_t atomic::add(uint32_t *Addr
, uint32_t V
, int Ordering
) {
316 return impl::atomicAdd(Addr
, V
, Ordering
);
319 uint64_t atomic::add(uint64_t *Addr
, uint64_t V
, int Ordering
) {
320 return impl::atomicAdd(Addr
, V
, Ordering
);
324 void __kmpc_ordered(IdentTy
*Loc
, int32_t TId
) { FunctionTracingRAII(); }
326 void __kmpc_end_ordered(IdentTy
*Loc
, int32_t TId
) { FunctionTracingRAII(); }
328 int32_t __kmpc_cancel_barrier(IdentTy
*Loc
, int32_t TId
) {
329 FunctionTracingRAII();
330 __kmpc_barrier(Loc
, TId
);
334 void __kmpc_barrier(IdentTy
*Loc
, int32_t TId
) {
335 FunctionTracingRAII();
336 if (mapping::isMainThreadInGenericMode())
337 return __kmpc_flush(Loc
);
339 if (mapping::isSPMDMode())
340 return __kmpc_barrier_simple_spmd(Loc
, TId
);
342 impl::namedBarrier();
345 __attribute__((noinline
)) void __kmpc_barrier_simple_spmd(IdentTy
*Loc
,
347 FunctionTracingRAII();
348 synchronize::threadsAligned();
351 __attribute__((noinline
)) void __kmpc_barrier_simple_generic(IdentTy
*Loc
,
353 FunctionTracingRAII();
354 synchronize::threads();
357 int32_t __kmpc_master(IdentTy
*Loc
, int32_t TId
) {
358 FunctionTracingRAII();
359 return omp_get_team_num() == 0;
362 void __kmpc_end_master(IdentTy
*Loc
, int32_t TId
) { FunctionTracingRAII(); }
364 int32_t __kmpc_single(IdentTy
*Loc
, int32_t TId
) {
365 FunctionTracingRAII();
366 return __kmpc_master(Loc
, TId
);
369 void __kmpc_end_single(IdentTy
*Loc
, int32_t TId
) {
370 FunctionTracingRAII();
371 // The barrier is explicitly called.
374 void __kmpc_flush(IdentTy
*Loc
) {
375 FunctionTracingRAII();
376 fence::kernel(__ATOMIC_SEQ_CST
);
379 uint64_t __kmpc_warp_active_thread_mask(void) {
380 FunctionTracingRAII();
381 return mapping::activemask();
384 void __kmpc_syncwarp(uint64_t Mask
) {
385 FunctionTracingRAII();
386 synchronize::warp(Mask
);
389 void __kmpc_critical(IdentTy
*Loc
, int32_t TId
, CriticalNameTy
*Name
) {
390 FunctionTracingRAII();
391 omp_set_lock(reinterpret_cast<omp_lock_t
*>(Name
));
394 void __kmpc_end_critical(IdentTy
*Loc
, int32_t TId
, CriticalNameTy
*Name
) {
395 FunctionTracingRAII();
396 omp_unset_lock(reinterpret_cast<omp_lock_t
*>(Name
));
399 void omp_init_lock(omp_lock_t
*Lock
) { impl::initLock(Lock
); }
401 void omp_destroy_lock(omp_lock_t
*Lock
) { impl::destroyLock(Lock
); }
403 void omp_set_lock(omp_lock_t
*Lock
) { impl::setLock(Lock
); }
405 void omp_unset_lock(omp_lock_t
*Lock
) { impl::unsetLock(Lock
); }
407 int omp_test_lock(omp_lock_t
*Lock
) { return impl::testLock(Lock
); }
410 #pragma omp end declare target