[clang][modules] Don't prevent translation of FW_Private includes when explicitly...
[llvm-project.git] / openmp / libomptarget / DeviceRTL / src / Synchronization.cpp
blobad3b1cad41943644918971f5662e6fd887d1e6a6
1 //===- Synchronization.cpp - OpenMP Device synchronization API ---- c++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Include all synchronization.
11 //===----------------------------------------------------------------------===//
13 #include "Synchronization.h"
15 #include "Debug.h"
16 #include "Interface.h"
17 #include "Mapping.h"
18 #include "State.h"
19 #include "Types.h"
20 #include "Utils.h"
22 #pragma omp begin declare target device_type(nohost)
24 using namespace ompx;
26 namespace impl {
28 /// Atomics
29 ///
30 ///{
31 /// NOTE: This function needs to be implemented by every target.
32 uint32_t atomicInc(uint32_t *Address, uint32_t Val, atomic::OrderingTy Ordering,
33 atomic::MemScopeTy MemScope);
35 template <typename Ty>
36 Ty atomicAdd(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
37 return __atomic_fetch_add(Address, Val, Ordering);
40 template <typename Ty>
41 Ty atomicMul(Ty *Address, Ty V, atomic::OrderingTy Ordering) {
42 Ty TypedCurrentVal, TypedResultVal, TypedNewVal;
43 bool Success;
44 do {
45 TypedCurrentVal = atomic::load(Address, Ordering);
46 TypedNewVal = TypedCurrentVal * V;
47 Success = atomic::cas(Address, TypedCurrentVal, TypedNewVal, Ordering,
48 atomic::relaxed);
49 } while (!Success);
50 return TypedResultVal;
53 template <typename Ty> Ty atomicLoad(Ty *Address, atomic::OrderingTy Ordering) {
54 return atomicAdd(Address, Ty(0), Ordering);
57 template <typename Ty>
58 void atomicStore(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
59 __atomic_store_n(Address, Val, Ordering);
62 template <typename Ty>
63 bool atomicCAS(Ty *Address, Ty ExpectedV, Ty DesiredV,
64 atomic::OrderingTy OrderingSucc,
65 atomic::OrderingTy OrderingFail) {
66 return __atomic_compare_exchange(Address, &ExpectedV, &DesiredV, false,
67 OrderingSucc, OrderingFail);
70 template <typename Ty>
71 Ty atomicMin(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
72 return __atomic_fetch_min(Address, Val, Ordering);
75 template <typename Ty>
76 Ty atomicMax(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
77 return __atomic_fetch_max(Address, Val, Ordering);
80 // TODO: Implement this with __atomic_fetch_max and remove the duplication.
81 template <typename Ty, typename STy, typename UTy>
82 Ty atomicMinFP(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
83 if (Val >= 0)
84 return atomicMin((STy *)Address, utils::convertViaPun<STy>(Val), Ordering);
85 return atomicMax((UTy *)Address, utils::convertViaPun<UTy>(Val), Ordering);
88 template <typename Ty, typename STy, typename UTy>
89 Ty atomicMaxFP(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
90 if (Val >= 0)
91 return atomicMax((STy *)Address, utils::convertViaPun<STy>(Val), Ordering);
92 return atomicMin((UTy *)Address, utils::convertViaPun<UTy>(Val), Ordering);
95 template <typename Ty>
96 Ty atomicOr(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
97 return __atomic_fetch_or(Address, Val, Ordering);
100 template <typename Ty>
101 Ty atomicAnd(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
102 return __atomic_fetch_and(Address, Val, Ordering);
105 template <typename Ty>
106 Ty atomicXOr(Ty *Address, Ty Val, atomic::OrderingTy Ordering) {
107 return __atomic_fetch_xor(Address, Val, Ordering);
110 uint32_t atomicExchange(uint32_t *Address, uint32_t Val,
111 atomic::OrderingTy Ordering) {
112 uint32_t R;
113 __atomic_exchange(Address, &Val, &R, Ordering);
114 return R;
116 ///}
118 // Forward declarations defined to be defined for AMDGCN and NVPTX.
119 uint32_t atomicInc(uint32_t *A, uint32_t V, atomic::OrderingTy Ordering,
120 atomic::MemScopeTy MemScope);
121 void namedBarrierInit();
122 void namedBarrier();
123 void fenceTeam(atomic::OrderingTy Ordering);
124 void fenceKernel(atomic::OrderingTy Ordering);
125 void fenceSystem(atomic::OrderingTy Ordering);
126 void syncWarp(__kmpc_impl_lanemask_t);
127 void syncThreads(atomic::OrderingTy Ordering);
128 void syncThreadsAligned(atomic::OrderingTy Ordering) { syncThreads(Ordering); }
129 void unsetLock(omp_lock_t *);
130 int testLock(omp_lock_t *);
131 void initLock(omp_lock_t *);
132 void destroyLock(omp_lock_t *);
133 void setLock(omp_lock_t *);
134 void unsetCriticalLock(omp_lock_t *);
135 void setCriticalLock(omp_lock_t *);
137 /// AMDGCN Implementation
139 ///{
140 #pragma omp begin declare variant match(device = {arch(amdgcn)})
142 uint32_t atomicInc(uint32_t *A, uint32_t V, atomic::OrderingTy Ordering,
143 atomic::MemScopeTy MemScope) {
144 // builtin_amdgcn_atomic_inc32 should expand to this switch when
145 // passed a runtime value, but does not do so yet. Workaround here.
147 #define ScopeSwitch(ORDER) \
148 switch (MemScope) { \
149 case atomic::MemScopeTy::all: \
150 return __builtin_amdgcn_atomic_inc32(A, V, ORDER, ""); \
151 case atomic::MemScopeTy::device: \
152 return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "agent"); \
153 case atomic::MemScopeTy::cgroup: \
154 return __builtin_amdgcn_atomic_inc32(A, V, ORDER, "workgroup"); \
157 #define Case(ORDER) \
158 case ORDER: \
159 ScopeSwitch(ORDER)
161 switch (Ordering) {
162 default:
163 __builtin_unreachable();
164 Case(atomic::relaxed);
165 Case(atomic::aquire);
166 Case(atomic::release);
167 Case(atomic::acq_rel);
168 Case(atomic::seq_cst);
169 #undef Case
170 #undef ScopeSwitch
174 uint32_t SHARED(namedBarrierTracker);
176 void namedBarrierInit() {
177 // Don't have global ctors, and shared memory is not zero init
178 atomic::store(&namedBarrierTracker, 0u, atomic::release);
181 void namedBarrier() {
182 uint32_t NumThreads = omp_get_num_threads();
183 // assert(NumThreads % 32 == 0);
185 uint32_t WarpSize = mapping::getWarpSize();
186 uint32_t NumWaves = NumThreads / WarpSize;
188 fence::team(atomic::aquire);
190 // named barrier implementation for amdgcn.
191 // Uses two 16 bit unsigned counters. One for the number of waves to have
192 // reached the barrier, and one to count how many times the barrier has been
193 // passed. These are packed in a single atomically accessed 32 bit integer.
194 // Low bits for the number of waves, assumed zero before this call.
195 // High bits to count the number of times the barrier has been passed.
197 // precondition: NumWaves != 0;
198 // invariant: NumWaves * WarpSize == NumThreads;
199 // precondition: NumWaves < 0xffffu;
201 // Increment the low 16 bits once, using the lowest active thread.
202 if (mapping::isLeaderInWarp()) {
203 uint32_t load = atomic::add(&namedBarrierTracker, 1,
204 atomic::relaxed); // commutative
206 // Record the number of times the barrier has been passed
207 uint32_t generation = load & 0xffff0000u;
209 if ((load & 0x0000ffffu) == (NumWaves - 1)) {
210 // Reached NumWaves in low bits so this is the last wave.
211 // Set low bits to zero and increment high bits
212 load += 0x00010000u; // wrap is safe
213 load &= 0xffff0000u; // because bits zeroed second
215 // Reset the wave counter and release the waiting waves
216 atomic::store(&namedBarrierTracker, load, atomic::relaxed);
217 } else {
218 // more waves still to go, spin until generation counter changes
219 do {
220 __builtin_amdgcn_s_sleep(0);
221 load = atomic::load(&namedBarrierTracker, atomic::relaxed);
222 } while ((load & 0xffff0000u) == generation);
225 fence::team(atomic::release);
228 // sema checking of amdgcn_fence is aggressive. Intention is to patch clang
229 // so that it is usable within a template environment and so that a runtime
230 // value of the memory order is expanded to this switch within clang/llvm.
231 void fenceTeam(atomic::OrderingTy Ordering) {
232 switch (Ordering) {
233 default:
234 __builtin_unreachable();
235 case atomic::aquire:
236 return __builtin_amdgcn_fence(atomic::aquire, "workgroup");
237 case atomic::release:
238 return __builtin_amdgcn_fence(atomic::release, "workgroup");
239 case atomic::acq_rel:
240 return __builtin_amdgcn_fence(atomic::acq_rel, "workgroup");
241 case atomic::seq_cst:
242 return __builtin_amdgcn_fence(atomic::seq_cst, "workgroup");
245 void fenceKernel(atomic::OrderingTy Ordering) {
246 switch (Ordering) {
247 default:
248 __builtin_unreachable();
249 case atomic::aquire:
250 return __builtin_amdgcn_fence(atomic::aquire, "agent");
251 case atomic::release:
252 return __builtin_amdgcn_fence(atomic::release, "agent");
253 case atomic::acq_rel:
254 return __builtin_amdgcn_fence(atomic::acq_rel, "agent");
255 case atomic::seq_cst:
256 return __builtin_amdgcn_fence(atomic::seq_cst, "agent");
259 void fenceSystem(atomic::OrderingTy Ordering) {
260 switch (Ordering) {
261 default:
262 __builtin_unreachable();
263 case atomic::aquire:
264 return __builtin_amdgcn_fence(atomic::aquire, "");
265 case atomic::release:
266 return __builtin_amdgcn_fence(atomic::release, "");
267 case atomic::acq_rel:
268 return __builtin_amdgcn_fence(atomic::acq_rel, "");
269 case atomic::seq_cst:
270 return __builtin_amdgcn_fence(atomic::seq_cst, "");
274 void syncWarp(__kmpc_impl_lanemask_t) {
275 // This is a no-op on current AMDGPU hardware but it is used by the optimizer
276 // to enforce convergent behaviour between control flow graphs.
277 __builtin_amdgcn_wave_barrier();
280 void syncThreads(atomic::OrderingTy Ordering) {
281 if (Ordering != atomic::relaxed)
282 fenceTeam(Ordering == atomic::acq_rel ? atomic::release : atomic::seq_cst);
284 __builtin_amdgcn_s_barrier();
286 if (Ordering != atomic::relaxed)
287 fenceTeam(Ordering == atomic::acq_rel ? atomic::aquire : atomic::seq_cst);
289 void syncThreadsAligned(atomic::OrderingTy Ordering) { syncThreads(Ordering); }
291 // TODO: Don't have wavefront lane locks. Possibly can't have them.
292 void unsetLock(omp_lock_t *) { __builtin_trap(); }
293 int testLock(omp_lock_t *) { __builtin_trap(); }
294 void initLock(omp_lock_t *) { __builtin_trap(); }
295 void destroyLock(omp_lock_t *) { __builtin_trap(); }
296 void setLock(omp_lock_t *) { __builtin_trap(); }
298 constexpr uint32_t UNSET = 0;
299 constexpr uint32_t SET = 1;
301 void unsetCriticalLock(omp_lock_t *Lock) {
302 (void)atomicExchange((uint32_t *)Lock, UNSET, atomic::acq_rel);
305 void setCriticalLock(omp_lock_t *Lock) {
306 uint64_t LowestActiveThread = utils::ffs(mapping::activemask()) - 1;
307 if (mapping::getThreadIdInWarp() == LowestActiveThread) {
308 fenceKernel(atomic::release);
309 while (!atomicCAS((uint32_t *)Lock, UNSET, SET, atomic::relaxed,
310 atomic::relaxed)) {
311 __builtin_amdgcn_s_sleep(32);
313 fenceKernel(atomic::aquire);
317 #pragma omp end declare variant
318 ///}
320 /// NVPTX Implementation
322 ///{
323 #pragma omp begin declare variant match( \
324 device = {arch(nvptx, nvptx64)}, \
325 implementation = {extension(match_any)})
327 uint32_t atomicInc(uint32_t *Address, uint32_t Val, atomic::OrderingTy Ordering,
328 atomic::MemScopeTy MemScope) {
329 return __nvvm_atom_inc_gen_ui(Address, Val);
332 void namedBarrierInit() {}
334 void namedBarrier() {
335 uint32_t NumThreads = omp_get_num_threads();
336 ASSERT(NumThreads % 32 == 0, nullptr);
338 // The named barrier for active parallel threads of a team in an L1 parallel
339 // region to synchronize with each other.
340 constexpr int BarrierNo = 7;
341 asm volatile("barrier.sync %0, %1;"
343 : "r"(BarrierNo), "r"(NumThreads)
344 : "memory");
347 void fenceTeam(atomic::OrderingTy) { __nvvm_membar_cta(); }
349 void fenceKernel(atomic::OrderingTy) { __nvvm_membar_gl(); }
351 void fenceSystem(atomic::OrderingTy) { __nvvm_membar_sys(); }
353 void syncWarp(__kmpc_impl_lanemask_t Mask) { __nvvm_bar_warp_sync(Mask); }
355 void syncThreads(atomic::OrderingTy Ordering) {
356 constexpr int BarrierNo = 8;
357 asm volatile("barrier.sync %0;" : : "r"(BarrierNo) : "memory");
360 void syncThreadsAligned(atomic::OrderingTy Ordering) { __syncthreads(); }
362 constexpr uint32_t OMP_SPIN = 1000;
363 constexpr uint32_t UNSET = 0;
364 constexpr uint32_t SET = 1;
366 // TODO: This seems to hide a bug in the declare variant handling. If it is
367 // called before it is defined
368 // here the overload won't happen. Investigate lalter!
369 void unsetLock(omp_lock_t *Lock) {
370 (void)atomicExchange((uint32_t *)Lock, UNSET, atomic::seq_cst);
373 int testLock(omp_lock_t *Lock) {
374 return atomicAdd((uint32_t *)Lock, 0u, atomic::seq_cst);
377 void initLock(omp_lock_t *Lock) { unsetLock(Lock); }
379 void destroyLock(omp_lock_t *Lock) { unsetLock(Lock); }
381 void setLock(omp_lock_t *Lock) {
382 // TODO: not sure spinning is a good idea here..
383 while (atomicCAS((uint32_t *)Lock, UNSET, SET, atomic::seq_cst,
384 atomic::seq_cst) != UNSET) {
385 int32_t start = __nvvm_read_ptx_sreg_clock();
386 int32_t now;
387 for (;;) {
388 now = __nvvm_read_ptx_sreg_clock();
389 int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
390 if (cycles >= OMP_SPIN * mapping::getBlockIdInKernel()) {
391 break;
394 } // wait for 0 to be the read value
397 #pragma omp end declare variant
398 ///}
400 } // namespace impl
402 void synchronize::init(bool IsSPMD) {
403 if (!IsSPMD)
404 impl::namedBarrierInit();
407 void synchronize::warp(LaneMaskTy Mask) { impl::syncWarp(Mask); }
409 void synchronize::threads(atomic::OrderingTy Ordering) {
410 impl::syncThreads(Ordering);
413 void synchronize::threadsAligned(atomic::OrderingTy Ordering) {
414 impl::syncThreadsAligned(Ordering);
417 void fence::team(atomic::OrderingTy Ordering) { impl::fenceTeam(Ordering); }
419 void fence::kernel(atomic::OrderingTy Ordering) { impl::fenceKernel(Ordering); }
421 void fence::system(atomic::OrderingTy Ordering) { impl::fenceSystem(Ordering); }
423 #define ATOMIC_COMMON_OP(TY) \
424 TY atomic::add(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
425 return impl::atomicAdd(Addr, V, Ordering); \
427 TY atomic::mul(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
428 return impl::atomicMul(Addr, V, Ordering); \
430 TY atomic::load(TY *Addr, atomic::OrderingTy Ordering) { \
431 return impl::atomicLoad(Addr, Ordering); \
433 bool atomic::cas(TY *Addr, TY ExpectedV, TY DesiredV, \
434 atomic::OrderingTy OrderingSucc, \
435 atomic::OrderingTy OrderingFail) { \
436 return impl::atomicCAS(Addr, ExpectedV, DesiredV, OrderingSucc, \
437 OrderingFail); \
440 #define ATOMIC_FP_ONLY_OP(TY, STY, UTY) \
441 TY atomic::min(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
442 return impl::atomicMinFP<TY, STY, UTY>(Addr, V, Ordering); \
444 TY atomic::max(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
445 return impl::atomicMaxFP<TY, STY, UTY>(Addr, V, Ordering); \
447 void atomic::store(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
448 impl::atomicStore(reinterpret_cast<UTY *>(Addr), \
449 utils::convertViaPun<UTY>(V), Ordering); \
452 #define ATOMIC_INT_ONLY_OP(TY) \
453 TY atomic::min(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
454 return impl::atomicMin<TY>(Addr, V, Ordering); \
456 TY atomic::max(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
457 return impl::atomicMax<TY>(Addr, V, Ordering); \
459 TY atomic::bit_or(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
460 return impl::atomicOr(Addr, V, Ordering); \
462 TY atomic::bit_and(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
463 return impl::atomicAnd(Addr, V, Ordering); \
465 TY atomic::bit_xor(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
466 return impl::atomicXOr(Addr, V, Ordering); \
468 void atomic::store(TY *Addr, TY V, atomic::OrderingTy Ordering) { \
469 impl::atomicStore(Addr, V, Ordering); \
472 #define ATOMIC_FP_OP(TY, STY, UTY) \
473 ATOMIC_FP_ONLY_OP(TY, STY, UTY) \
474 ATOMIC_COMMON_OP(TY)
476 #define ATOMIC_INT_OP(TY) \
477 ATOMIC_INT_ONLY_OP(TY) \
478 ATOMIC_COMMON_OP(TY)
480 // This needs to be kept in sync with the header. Also the reason we don't use
481 // templates here.
482 ATOMIC_INT_OP(int8_t)
483 ATOMIC_INT_OP(int16_t)
484 ATOMIC_INT_OP(int32_t)
485 ATOMIC_INT_OP(int64_t)
486 ATOMIC_INT_OP(uint8_t)
487 ATOMIC_INT_OP(uint16_t)
488 ATOMIC_INT_OP(uint32_t)
489 ATOMIC_INT_OP(uint64_t)
490 ATOMIC_FP_OP(float, int32_t, uint32_t)
491 ATOMIC_FP_OP(double, int64_t, uint64_t)
493 #undef ATOMIC_INT_ONLY_OP
494 #undef ATOMIC_FP_ONLY_OP
495 #undef ATOMIC_COMMON_OP
496 #undef ATOMIC_INT_OP
497 #undef ATOMIC_FP_OP
499 uint32_t atomic::inc(uint32_t *Addr, uint32_t V, atomic::OrderingTy Ordering,
500 atomic::MemScopeTy MemScope) {
501 return impl::atomicInc(Addr, V, Ordering, MemScope);
504 void unsetCriticalLock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
506 void setCriticalLock(omp_lock_t *Lock) { impl::setLock(Lock); }
508 extern "C" {
509 void __kmpc_ordered(IdentTy *Loc, int32_t TId) {}
511 void __kmpc_end_ordered(IdentTy *Loc, int32_t TId) {}
513 int32_t __kmpc_cancel_barrier(IdentTy *Loc, int32_t TId) {
514 __kmpc_barrier(Loc, TId);
515 return 0;
518 void __kmpc_barrier(IdentTy *Loc, int32_t TId) {
519 if (mapping::isMainThreadInGenericMode())
520 return __kmpc_flush(Loc);
522 if (mapping::isSPMDMode())
523 return __kmpc_barrier_simple_spmd(Loc, TId);
525 impl::namedBarrier();
528 [[clang::noinline]] void __kmpc_barrier_simple_spmd(IdentTy *Loc, int32_t TId) {
529 synchronize::threadsAligned(atomic::OrderingTy::seq_cst);
532 [[clang::noinline]] void __kmpc_barrier_simple_generic(IdentTy *Loc,
533 int32_t TId) {
534 synchronize::threads(atomic::OrderingTy::seq_cst);
537 int32_t __kmpc_master(IdentTy *Loc, int32_t TId) {
538 return omp_get_thread_num() == 0;
541 void __kmpc_end_master(IdentTy *Loc, int32_t TId) {}
543 int32_t __kmpc_masked(IdentTy *Loc, int32_t TId, int32_t Filter) {
544 return omp_get_thread_num() == Filter;
547 void __kmpc_end_masked(IdentTy *Loc, int32_t TId) {}
549 int32_t __kmpc_single(IdentTy *Loc, int32_t TId) {
550 return __kmpc_master(Loc, TId);
553 void __kmpc_end_single(IdentTy *Loc, int32_t TId) {
554 // The barrier is explicitly called.
557 void __kmpc_flush(IdentTy *Loc) { fence::kernel(atomic::seq_cst); }
559 uint64_t __kmpc_warp_active_thread_mask(void) { return mapping::activemask(); }
561 void __kmpc_syncwarp(uint64_t Mask) { synchronize::warp(Mask); }
563 void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
564 impl::setCriticalLock(reinterpret_cast<omp_lock_t *>(Name));
567 void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name) {
568 impl::unsetCriticalLock(reinterpret_cast<omp_lock_t *>(Name));
571 void omp_init_lock(omp_lock_t *Lock) { impl::initLock(Lock); }
573 void omp_destroy_lock(omp_lock_t *Lock) { impl::destroyLock(Lock); }
575 void omp_set_lock(omp_lock_t *Lock) { impl::setLock(Lock); }
577 void omp_unset_lock(omp_lock_t *Lock) { impl::unsetLock(Lock); }
579 int omp_test_lock(omp_lock_t *Lock) { return impl::testLock(Lock); }
581 void ompx_sync_block(int Ordering) {
582 impl::syncThreadsAligned(atomic::OrderingTy(Ordering));
584 void ompx_sync_block_acq_rel() {
585 impl::syncThreadsAligned(atomic::OrderingTy::acq_rel);
587 void ompx_sync_block_divergent(int Ordering) {
588 impl::syncThreads(atomic::OrderingTy(Ordering));
590 } // extern "C"
592 #pragma omp end declare target