1 //===---------- target_impl.cu - NVPTX OpenMP GPU options ------- CUDA -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Definitions of target specific functions
11 //===----------------------------------------------------------------------===//
12 #pragma omp declare target
14 #include "common/debug.h"
15 #include "target_impl.h"
16 #include "target_interface.h"
18 EXTERN void __kmpc_impl_unpack(uint64_t val, uint32_t &lo, uint32_t &hi) {
19 asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val));
22 EXTERN uint64_t __kmpc_impl_pack(uint32_t lo, uint32_t hi) {
24 asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi));
28 EXTERN __kmpc_impl_lanemask_t __kmpc_impl_lanemask_lt() {
29 __kmpc_impl_lanemask_t res;
30 asm("mov.u32 %0, %%lanemask_lt;" : "=r"(res));
34 EXTERN __kmpc_impl_lanemask_t __kmpc_impl_lanemask_gt() {
35 __kmpc_impl_lanemask_t res;
36 asm("mov.u32 %0, %%lanemask_gt;" : "=r"(res));
40 EXTERN uint32_t __kmpc_impl_smid() {
42 asm("mov.u32 %0, %%smid;" : "=r"(id));
46 EXTERN double __kmpc_impl_get_wtick() {
47 // Timer precision is 1ns
48 return ((double)1E-9);
51 EXTERN double __kmpc_impl_get_wtime() {
52 unsigned long long nsecs;
53 asm("mov.u64 %0, %%globaltimer;" : "=l"(nsecs));
54 return (double)nsecs * __kmpc_impl_get_wtick();
57 EXTERN __kmpc_impl_lanemask_t __kmpc_impl_activemask() {
59 asm volatile("activemask.b32 %0;" : "=r"(Mask));
63 EXTERN void __kmpc_impl_syncthreads() {
65 asm volatile("barrier.sync %0;"
71 EXTERN void __kmpc_impl_syncwarp(__kmpc_impl_lanemask_t Mask) {
72 __nvvm_bar_warp_sync(Mask);
75 // NVPTX specific kernel initialization
76 EXTERN void __kmpc_impl_target_init() { /* nvptx needs no extra setup */
79 // Barrier until num_threads arrive.
80 EXTERN void __kmpc_impl_named_sync(uint32_t num_threads) {
81 // The named barrier for active parallel threads of a team in an L1 parallel
82 // region to synchronize with each other.
84 asm volatile("barrier.sync %0, %1;"
86 : "r"(barrier), "r"(num_threads)
90 EXTERN void __kmpc_impl_threadfence() { __nvvm_membar_gl(); }
91 EXTERN void __kmpc_impl_threadfence_block() { __nvvm_membar_cta(); }
92 EXTERN void __kmpc_impl_threadfence_system() { __nvvm_membar_sys(); }
94 // Calls to the NVPTX layer (assuming 1D layout)
95 EXTERN int __kmpc_get_hardware_thread_id_in_block() {
96 return __nvvm_read_ptx_sreg_tid_x();
98 EXTERN int GetBlockIdInKernel() { return __nvvm_read_ptx_sreg_ctaid_x(); }
99 EXTERN int __kmpc_get_hardware_num_blocks() {
100 return __nvvm_read_ptx_sreg_nctaid_x();
102 EXTERN int __kmpc_get_hardware_num_threads_in_block() {
103 return __nvvm_read_ptx_sreg_ntid_x();
105 EXTERN unsigned __kmpc_get_warp_size() { return WARPSIZE; }
106 EXTERN unsigned GetWarpId() {
107 return __kmpc_get_hardware_thread_id_in_block() / WARPSIZE;
109 EXTERN unsigned GetLaneId() {
110 return __kmpc_get_hardware_thread_id_in_block() & (WARPSIZE - 1);
114 uint32_t __kmpc_atomic_add(uint32_t *Address, uint32_t Val) {
115 return __atomic_fetch_add(Address, Val, __ATOMIC_SEQ_CST);
117 uint32_t __kmpc_atomic_inc(uint32_t *Address, uint32_t Val) {
118 return __nvvm_atom_inc_gen_ui(Address, Val);
121 uint32_t __kmpc_atomic_max(uint32_t *Address, uint32_t Val) {
122 return __atomic_fetch_max(Address, Val, __ATOMIC_SEQ_CST);
125 uint32_t __kmpc_atomic_exchange(uint32_t *Address, uint32_t Val) {
127 __atomic_exchange(Address, &Val, &R, __ATOMIC_SEQ_CST);
131 uint32_t __kmpc_atomic_cas(uint32_t *Address, uint32_t Compare, uint32_t Val) {
132 (void)__atomic_compare_exchange(Address, &Compare, &Val, false,
133 __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
137 unsigned long long __kmpc_atomic_exchange(unsigned long long *Address,
138 unsigned long long Val) {
139 unsigned long long R;
140 __atomic_exchange(Address, &Val, &R, __ATOMIC_SEQ_CST);
144 unsigned long long __kmpc_atomic_add(unsigned long long *Address,
145 unsigned long long Val) {
146 return __atomic_fetch_add(Address, Val, __ATOMIC_SEQ_CST);
149 #define __OMP_SPIN 1000
153 EXTERN void __kmpc_impl_init_lock(omp_lock_t *lock) {
154 __kmpc_impl_unset_lock(lock);
157 EXTERN void __kmpc_impl_destroy_lock(omp_lock_t *lock) {
158 __kmpc_impl_unset_lock(lock);
161 EXTERN void __kmpc_impl_set_lock(omp_lock_t *lock) {
162 // TODO: not sure spinning is a good idea here..
163 while (__kmpc_atomic_cas(lock, UNSET, SET) != UNSET) {
164 int32_t start = __nvvm_read_ptx_sreg_clock();
167 now = __nvvm_read_ptx_sreg_clock();
168 int32_t cycles = now > start ? now - start : now + (0xffffffff - start);
169 if (cycles >= __OMP_SPIN * GetBlockIdInKernel()) {
173 } // wait for 0 to be the read value
176 EXTERN void __kmpc_impl_unset_lock(omp_lock_t *lock) {
177 (void)__kmpc_atomic_exchange(lock, UNSET);
180 EXTERN int __kmpc_impl_test_lock(omp_lock_t *lock) {
181 return __kmpc_atomic_add(lock, 0u);
185 void *malloc(size_t);
187 int32_t vprintf(const char *, void *);
190 EXTERN void *__kmpc_impl_malloc(size_t x) { return malloc(x); }
191 EXTERN void __kmpc_impl_free(void *x) { free(x); }
193 EXTERN int32_t __llvm_omp_vprintf(const char *Format, void *Arguments,
195 return vprintf(Format, Arguments);
198 #pragma omp end declare target