[offload] [test] Use test compiler ID rather than host (#124408)
[llvm-project.git] / clang / lib / Headers / nvptxintrin.h
blobfb2864eab6a09dbf53b773308b47fe8e9bab1158
1 //===-- nvptxintrin.h - NVPTX intrinsic functions -------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #ifndef __NVPTXINTRIN_H
10 #define __NVPTXINTRIN_H
12 #ifndef __NVPTX__
13 #error "This file is intended for NVPTX targets or offloading to NVPTX"
14 #endif
16 #include <stdint.h>
18 #if !defined(__cplusplus)
19 _Pragma("push_macro(\"bool\")");
20 #define bool _Bool
21 #endif
23 _Pragma("omp begin declare target device_type(nohost)");
24 _Pragma("omp begin declare variant match(device = {arch(nvptx64)})");
26 // Type aliases to the address spaces used by the NVPTX backend.
27 #define __gpu_private __attribute__((address_space(5)))
28 #define __gpu_constant __attribute__((address_space(4)))
29 #define __gpu_local __attribute__((address_space(3)))
30 #define __gpu_global __attribute__((address_space(1)))
31 #define __gpu_generic __attribute__((address_space(0)))
33 // Attribute to declare a function as a kernel.
34 #define __gpu_kernel __attribute__((nvptx_kernel, visibility("protected")))
36 // Returns the number of CUDA blocks in the 'x' dimension.
37 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_x(void) {
38 return __nvvm_read_ptx_sreg_nctaid_x();
41 // Returns the number of CUDA blocks in the 'y' dimension.
42 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_y(void) {
43 return __nvvm_read_ptx_sreg_nctaid_y();
46 // Returns the number of CUDA blocks in the 'z' dimension.
47 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_z(void) {
48 return __nvvm_read_ptx_sreg_nctaid_z();
51 // Returns the 'x' dimension of the current CUDA block's id.
52 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_x(void) {
53 return __nvvm_read_ptx_sreg_ctaid_x();
56 // Returns the 'y' dimension of the current CUDA block's id.
57 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_y(void) {
58 return __nvvm_read_ptx_sreg_ctaid_y();
61 // Returns the 'z' dimension of the current CUDA block's id.
62 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_z(void) {
63 return __nvvm_read_ptx_sreg_ctaid_z();
66 // Returns the number of CUDA threads in the 'x' dimension.
67 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_x(void) {
68 return __nvvm_read_ptx_sreg_ntid_x();
71 // Returns the number of CUDA threads in the 'y' dimension.
72 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_y(void) {
73 return __nvvm_read_ptx_sreg_ntid_y();
76 // Returns the number of CUDA threads in the 'z' dimension.
77 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_z(void) {
78 return __nvvm_read_ptx_sreg_ntid_z();
81 // Returns the 'x' dimension id of the thread in the current CUDA block.
82 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_x(void) {
83 return __nvvm_read_ptx_sreg_tid_x();
86 // Returns the 'y' dimension id of the thread in the current CUDA block.
87 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_y(void) {
88 return __nvvm_read_ptx_sreg_tid_y();
91 // Returns the 'z' dimension id of the thread in the current CUDA block.
92 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_z(void) {
93 return __nvvm_read_ptx_sreg_tid_z();
96 // Returns the size of a CUDA warp, always 32 on NVIDIA hardware.
97 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_lanes(void) {
98 return __nvvm_read_ptx_sreg_warpsize();
101 // Returns the id of the thread inside of a CUDA warp executing together.
102 _DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_lane_id(void) {
103 return __nvvm_read_ptx_sreg_laneid();
106 // Returns the bit-mask of active threads in the current warp.
107 _DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_lane_mask(void) {
108 return __nvvm_activemask();
111 // Copies the value from the first active thread in the warp to the rest.
112 _DEFAULT_FN_ATTRS static __inline__ uint32_t
113 __gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) {
114 uint32_t __mask = (uint32_t)__lane_mask;
115 uint32_t __id = __builtin_ffs(__mask) - 1;
116 return __nvvm_shfl_sync_idx_i32(__mask, __x, __id, __gpu_num_lanes() - 1);
119 // Copies the value from the first active thread in the warp to the rest.
120 _DEFAULT_FN_ATTRS static __inline__ uint64_t
121 __gpu_read_first_lane_u64(uint64_t __lane_mask, uint64_t __x) {
122 uint32_t __hi = (uint32_t)(__x >> 32ull);
123 uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
124 uint32_t __mask = (uint32_t)__lane_mask;
125 uint32_t __id = __builtin_ffs(__mask) - 1;
126 return ((uint64_t)__nvvm_shfl_sync_idx_i32(__mask, __hi, __id,
127 __gpu_num_lanes() - 1)
128 << 32ull) |
129 ((uint64_t)__nvvm_shfl_sync_idx_i32(__mask, __lo, __id,
130 __gpu_num_lanes() - 1));
133 // Returns a bitmask of threads in the current lane for which \p x is true.
134 _DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask,
135 bool __x) {
136 uint32_t __mask = (uint32_t)__lane_mask;
137 return __nvvm_vote_ballot_sync(__mask, __x);
140 // Waits for all the threads in the block to converge and issues a fence.
141 _DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_threads(void) {
142 __syncthreads();
145 // Waits for all threads in the warp to reconverge for independent scheduling.
146 _DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_lane(uint64_t __lane_mask) {
147 __nvvm_bar_warp_sync((uint32_t)__lane_mask);
150 // Shuffles the the lanes inside the warp according to the given index.
151 _DEFAULT_FN_ATTRS static __inline__ uint32_t
152 __gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x) {
153 uint32_t __mask = (uint32_t)__lane_mask;
154 return __nvvm_shfl_sync_idx_i32(__mask, __x, __idx, __gpu_num_lanes() - 1u);
157 // Shuffles the the lanes inside the warp according to the given index.
158 _DEFAULT_FN_ATTRS static __inline__ uint64_t
159 __gpu_shuffle_idx_u64(uint64_t __lane_mask, uint32_t __idx, uint64_t __x) {
160 uint32_t __hi = (uint32_t)(__x >> 32ull);
161 uint32_t __lo = (uint32_t)(__x & 0xFFFFFFFF);
162 uint32_t __mask = (uint32_t)__lane_mask;
163 return ((uint64_t)__nvvm_shfl_sync_idx_i32(__mask, __hi, __idx,
164 __gpu_num_lanes() - 1u)
165 << 32ull) |
166 ((uint64_t)__nvvm_shfl_sync_idx_i32(__mask, __lo, __idx,
167 __gpu_num_lanes() - 1u));
170 // Returns true if the flat pointer points to CUDA 'shared' memory.
171 _DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_local(void *ptr) {
172 return __nvvm_isspacep_shared(ptr);
175 // Returns true if the flat pointer points to CUDA 'local' memory.
176 _DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_private(void *ptr) {
177 return __nvvm_isspacep_local(ptr);
180 // Terminates execution of the calling thread.
181 _DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void __gpu_exit(void) {
182 __nvvm_exit();
185 // Suspend the thread briefly to assist the scheduler during busy loops.
186 _DEFAULT_FN_ATTRS static __inline__ void __gpu_thread_suspend(void) {
187 if (__nvvm_reflect("__CUDA_ARCH") >= 700)
188 asm("nanosleep.u32 64;" ::: "memory");
191 _Pragma("omp end declare variant");
192 _Pragma("omp end declare target");
194 #if !defined(__cplusplus)
195 _Pragma("pop_macro(\"bool\")");
196 #endif
198 #endif // __NVPTXINTRIN_H