[sanitizer] Improve FreeBSD ASLR detection
[llvm-project.git] / openmp / libomptarget / DeviceRTL / include / Interface.h
blob9ef9b823f9e2df187fd75c26139b6044e4dce32f
1 //===-------- Interface.h - OpenMP interface ---------------------- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 //
10 //===----------------------------------------------------------------------===//
12 #ifndef OMPTARGET_DEVICERTL_INTERFACE_H
13 #define OMPTARGET_DEVICERTL_INTERFACE_H
15 #include "Types.h"
17 /// External API
18 ///
19 ///{
21 extern "C" {
23 /// ICV: dyn-var, constant 0
24 ///
25 /// setter: ignored.
26 /// getter: returns 0.
27 ///
28 ///{
29 void omp_set_dynamic(int);
30 int omp_get_dynamic(void);
31 ///}
33 /// ICV: nthreads-var, integer
34 ///
35 /// scope: data environment
36 ///
37 /// setter: ignored.
38 /// getter: returns false.
39 ///
40 /// implementation notes:
41 ///
42 ///
43 ///{
44 void omp_set_num_threads(int);
45 int omp_get_max_threads(void);
46 ///}
48 /// ICV: thread-limit-var, computed
49 ///
50 /// getter: returns thread limited defined during launch.
51 ///
52 ///{
53 int omp_get_thread_limit(void);
54 ///}
56 /// ICV: max-active-level-var, constant 1
57 ///
58 /// setter: ignored.
59 /// getter: returns 1.
60 ///
61 ///{
62 void omp_set_max_active_levels(int);
63 int omp_get_max_active_levels(void);
64 ///}
66 /// ICV: places-partition-var
67 ///
68 ///
69 ///{
70 ///}
72 /// ICV: active-level-var, 0 or 1
73 ///
74 /// getter: returns 0 or 1.
75 ///
76 ///{
77 int omp_get_active_level(void);
78 ///}
80 /// ICV: level-var
81 ///
82 /// getter: returns parallel region nesting
83 ///
84 ///{
85 int omp_get_level(void);
86 ///}
88 /// ICV: run-sched-var
89 ///
90 ///
91 ///{
92 void omp_set_schedule(omp_sched_t, int);
93 void omp_get_schedule(omp_sched_t *, int *);
94 ///}
96 /// TODO this is incomplete.
97 int omp_get_num_threads(void);
98 int omp_get_thread_num(void);
99 void omp_set_nested(int);
101 int omp_get_nested(void);
103 void omp_set_max_active_levels(int Level);
105 int omp_get_max_active_levels(void);
107 omp_proc_bind_t omp_get_proc_bind(void);
109 int omp_get_num_places(void);
111 int omp_get_place_num_procs(int place_num);
113 void omp_get_place_proc_ids(int place_num, int *ids);
115 int omp_get_place_num(void);
117 int omp_get_partition_num_places(void);
119 void omp_get_partition_place_nums(int *place_nums);
121 int omp_get_cancellation(void);
123 void omp_set_default_device(int deviceId);
125 int omp_get_default_device(void);
127 int omp_get_num_devices(void);
129 int omp_get_num_teams(void);
131 int omp_get_team_num();
133 int omp_get_initial_device(void);
135 void *llvm_omp_get_dynamic_shared();
137 /// Synchronization
139 ///{
140 void omp_init_lock(omp_lock_t *Lock);
142 void omp_destroy_lock(omp_lock_t *Lock);
144 void omp_set_lock(omp_lock_t *Lock);
146 void omp_unset_lock(omp_lock_t *Lock);
148 int omp_test_lock(omp_lock_t *Lock);
149 ///}
151 /// Tasking
153 ///{
154 int omp_in_final(void);
156 int omp_get_max_task_priority(void);
157 ///}
159 /// Misc
161 ///{
162 double omp_get_wtick(void);
164 double omp_get_wtime(void);
165 ///}
168 extern "C" {
169 /// Allocate \p Bytes in "shareable" memory and return the address. Needs to be
170 /// called balanced with __kmpc_free_shared like a stack (push/pop). Can be
171 /// called by any thread, allocation happens *per thread*.
172 void *__kmpc_alloc_shared(uint64_t Bytes);
174 /// Deallocate \p Ptr. Needs to be called balanced with __kmpc_alloc_shared like
175 /// a stack (push/pop). Can be called by any thread. \p Ptr has to be the
176 /// allocated by __kmpc_alloc_shared by the same thread.
177 void __kmpc_free_shared(void *Ptr, uint64_t Bytes);
179 /// Get a pointer to the memory buffer containing dynamically allocated shared
180 /// memory configured at launch.
181 void *__kmpc_get_dynamic_shared();
183 /// Allocate sufficient space for \p NumArgs sequential `void*` and store the
184 /// allocation address in \p GlobalArgs.
186 /// Called by the main thread prior to a parallel region.
188 /// We also remember it in GlobalArgsPtr to ensure the worker threads and
189 /// deallocation function know the allocation address too.
190 void __kmpc_begin_sharing_variables(void ***GlobalArgs, uint64_t NumArgs);
192 /// Deallocate the memory allocated by __kmpc_begin_sharing_variables.
194 /// Called by the main thread after a parallel region.
195 void __kmpc_end_sharing_variables();
197 /// Store the allocation address obtained via __kmpc_begin_sharing_variables in
198 /// \p GlobalArgs.
200 /// Called by the worker threads in the parallel region (function).
201 void __kmpc_get_shared_variables(void ***GlobalArgs);
203 /// External interface to get the thread ID.
204 uint32_t __kmpc_get_hardware_thread_id_in_block();
206 /// External interface to get the number of threads.
207 uint32_t __kmpc_get_hardware_num_threads_in_block();
209 /// External interface to get the warp size.
210 uint32_t __kmpc_get_warp_size();
212 /// Kernel
214 ///{
215 int8_t __kmpc_is_spmd_exec_mode();
217 int32_t __kmpc_target_init(IdentTy *Ident, int8_t Mode,
218 bool UseGenericStateMachine, bool);
220 void __kmpc_target_deinit(IdentTy *Ident, int8_t Mode, bool);
222 ///}
224 /// Reduction
226 ///{
227 void __kmpc_nvptx_end_reduce(int32_t TId);
229 void __kmpc_nvptx_end_reduce_nowait(int32_t TId);
231 int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
232 IdentTy *Loc, int32_t TId, int32_t num_vars, uint64_t reduce_size,
233 void *reduce_data, ShuffleReductFnTy shflFct, InterWarpCopyFnTy cpyFct);
235 int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
236 IdentTy *Loc, int32_t TId, void *GlobalBuffer, uint32_t num_of_records,
237 void *reduce_data, ShuffleReductFnTy shflFct, InterWarpCopyFnTy cpyFct,
238 ListGlobalFnTy lgcpyFct, ListGlobalFnTy lgredFct, ListGlobalFnTy glcpyFct,
239 ListGlobalFnTy glredFct);
240 ///}
242 /// Synchronization
244 ///{
245 void __kmpc_ordered(IdentTy *Loc, int32_t TId);
247 void __kmpc_end_ordered(IdentTy *Loc, int32_t TId);
249 int32_t __kmpc_cancel_barrier(IdentTy *Loc_ref, int32_t TId);
251 void __kmpc_barrier(IdentTy *Loc_ref, int32_t TId);
253 void __kmpc_barrier_simple_spmd(IdentTy *Loc_ref, int32_t TId);
255 void __kmpc_barrier_simple_generic(IdentTy *Loc_ref, int32_t TId);
257 int32_t __kmpc_master(IdentTy *Loc, int32_t TId);
259 void __kmpc_end_master(IdentTy *Loc, int32_t TId);
261 int32_t __kmpc_single(IdentTy *Loc, int32_t TId);
263 void __kmpc_end_single(IdentTy *Loc, int32_t TId);
265 void __kmpc_flush(IdentTy *Loc);
267 uint64_t __kmpc_warp_active_thread_mask(void);
269 void __kmpc_syncwarp(uint64_t Mask);
271 void __kmpc_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name);
273 void __kmpc_end_critical(IdentTy *Loc, int32_t TId, CriticalNameTy *Name);
274 ///}
276 /// Parallelism
278 ///{
279 /// TODO
280 void __kmpc_kernel_prepare_parallel(ParallelRegionFnTy WorkFn);
282 /// TODO
283 bool __kmpc_kernel_parallel(ParallelRegionFnTy *WorkFn);
285 /// TODO
286 void __kmpc_kernel_end_parallel();
288 /// TODO
289 void __kmpc_push_proc_bind(IdentTy *Loc, uint32_t TId, int ProcBind);
291 /// TODO
292 void __kmpc_push_num_teams(IdentTy *Loc, int32_t TId, int32_t NumTeams,
293 int32_t ThreadLimit);
295 /// TODO
296 uint16_t __kmpc_parallel_level(IdentTy *Loc, uint32_t);
298 ///}
300 /// Tasking
302 ///{
303 TaskDescriptorTy *__kmpc_omp_task_alloc(IdentTy *, uint32_t, int32_t,
304 uint32_t TaskSizeInclPrivateValues,
305 uint32_t SharedValuesSize,
306 TaskFnTy TaskFn);
308 int32_t __kmpc_omp_task(IdentTy *Loc, uint32_t TId,
309 TaskDescriptorTy *TaskDescriptor);
311 int32_t __kmpc_omp_task_with_deps(IdentTy *Loc, uint32_t TId,
312 TaskDescriptorTy *TaskDescriptor, int32_t,
313 void *, int32_t, void *);
315 void __kmpc_omp_task_begin_if0(IdentTy *Loc, uint32_t TId,
316 TaskDescriptorTy *TaskDescriptor);
318 void __kmpc_omp_task_complete_if0(IdentTy *Loc, uint32_t TId,
319 TaskDescriptorTy *TaskDescriptor);
321 void __kmpc_omp_wait_deps(IdentTy *Loc, uint32_t TId, int32_t, void *, int32_t,
322 void *);
324 void __kmpc_taskgroup(IdentTy *Loc, uint32_t TId);
326 void __kmpc_end_taskgroup(IdentTy *Loc, uint32_t TId);
328 int32_t __kmpc_omp_taskyield(IdentTy *Loc, uint32_t TId, int);
330 int32_t __kmpc_omp_taskwait(IdentTy *Loc, uint32_t TId);
332 void __kmpc_taskloop(IdentTy *Loc, uint32_t TId,
333 TaskDescriptorTy *TaskDescriptor, int,
334 uint64_t *LowerBound, uint64_t *UpperBound, int64_t, int,
335 int32_t, uint64_t, void *);
336 ///}
338 /// Misc
340 ///{
341 int32_t __kmpc_cancellationpoint(IdentTy *Loc, int32_t TId, int32_t CancelVal);
343 int32_t __kmpc_cancel(IdentTy *Loc, int32_t TId, int32_t CancelVal);
344 ///}
346 /// Shuffle
348 ///{
349 int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size);
350 int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size);
351 ///}
354 #endif