[Dexter] add an optnone attribute debug experience test for loops.
[llvm-project.git] / openmp / runtime / src / kmp_gsupport.cpp
blob0909070dbe02f4cbca2b7388161aa6730740e4c0
1 /*
2 * kmp_gsupport.cpp
3 */
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
13 #include "kmp.h"
14 #include "kmp_atomic.h"
16 #if OMPT_SUPPORT
17 #include "ompt-specific.h"
18 #endif
20 enum {
21 KMP_GOMP_TASK_UNTIED_FLAG = 1,
22 KMP_GOMP_TASK_FINAL_FLAG = 2,
23 KMP_GOMP_TASK_DEPENDS_FLAG = 8
26 // This class helps convert gomp dependency info into
27 // kmp_depend_info_t structures
28 class kmp_gomp_depends_info_t {
29 void **depend;
30 kmp_int32 num_deps;
31 size_t num_out, num_mutexinout, num_in;
32 size_t offset;
34 public:
35 kmp_gomp_depends_info_t(void **depend) : depend(depend) {
36 size_t ndeps = (kmp_intptr_t)depend[0];
37 size_t num_doable;
38 // GOMP taskdep structure:
39 // if depend[0] != 0:
40 // depend = [ ndeps | nout | &out | ... | &out | &in | ... | &in ]
42 // if depend[0] == 0:
43 // depend = [ 0 | ndeps | nout | nmtx | nin | &out | ... | &out | &mtx |
44 // ... | &mtx | &in | ... | &in | &depobj | ... | &depobj ]
45 if (ndeps) {
46 num_out = (kmp_intptr_t)depend[1];
47 num_in = ndeps - num_out;
48 num_mutexinout = 0;
49 num_doable = ndeps;
50 offset = 2;
51 } else {
52 ndeps = (kmp_intptr_t)depend[1];
53 num_out = (kmp_intptr_t)depend[2];
54 num_mutexinout = (kmp_intptr_t)depend[3];
55 num_in = (kmp_intptr_t)depend[4];
56 num_doable = num_out + num_mutexinout + num_in;
57 offset = 5;
59 // TODO: Support gomp depobj
60 if (ndeps != num_doable) {
61 KMP_FATAL(GompFeatureNotSupported, "depobj");
63 num_deps = static_cast<kmp_int32>(ndeps);
65 kmp_int32 get_num_deps() const { return num_deps; }
66 kmp_depend_info_t get_kmp_depend(size_t index) const {
67 kmp_depend_info_t retval;
68 memset(&retval, '\0', sizeof(retval));
69 KMP_ASSERT(index < (size_t)num_deps);
70 retval.base_addr = (kmp_intptr_t)depend[offset + index];
71 retval.len = 0;
72 // Because inout and out are logically equivalent,
73 // use inout and in dependency flags. GOMP does not provide a
74 // way to distinguish if user specified out vs. inout.
75 if (index < num_out) {
76 retval.flags.in = 1;
77 retval.flags.out = 1;
78 } else if (index >= num_out && index < (num_out + num_mutexinout)) {
79 retval.flags.mtx = 1;
80 } else {
81 retval.flags.in = 1;
83 return retval;
87 #ifdef __cplusplus
88 extern "C" {
89 #endif // __cplusplus
91 #define MKLOC(loc, routine) \
92 static ident_t loc = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"};
94 #include "kmp_ftn_os.h"
96 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) {
97 int gtid = __kmp_entry_gtid();
98 MKLOC(loc, "GOMP_barrier");
99 KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
100 #if OMPT_SUPPORT && OMPT_OPTIONAL
101 ompt_frame_t *ompt_frame;
102 if (ompt_enabled.enabled) {
103 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
104 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
105 OMPT_STORE_RETURN_ADDRESS(gtid);
107 #endif
108 __kmpc_barrier(&loc, gtid);
109 #if OMPT_SUPPORT && OMPT_OPTIONAL
110 if (ompt_enabled.enabled) {
111 ompt_frame->enter_frame = ompt_data_none;
113 #endif
116 // Mutual exclusion
118 // The symbol that icc/ifort generates for unnamed for unnamed critical sections
119 // - .gomp_critical_user_ - is defined using .comm in any objects reference it.
120 // We can't reference it directly here in C code, as the symbol contains a ".".
122 // The RTL contains an assembly language definition of .gomp_critical_user_
123 // with another symbol __kmp_unnamed_critical_addr initialized with it's
124 // address.
125 extern kmp_critical_name *__kmp_unnamed_critical_addr;
127 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_START)(void) {
128 int gtid = __kmp_entry_gtid();
129 MKLOC(loc, "GOMP_critical_start");
130 KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
131 #if OMPT_SUPPORT && OMPT_OPTIONAL
132 OMPT_STORE_RETURN_ADDRESS(gtid);
133 #endif
134 __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
137 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_END)(void) {
138 int gtid = __kmp_get_gtid();
139 MKLOC(loc, "GOMP_critical_end");
140 KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
141 #if OMPT_SUPPORT && OMPT_OPTIONAL
142 OMPT_STORE_RETURN_ADDRESS(gtid);
143 #endif
144 __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
147 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) {
148 int gtid = __kmp_entry_gtid();
149 MKLOC(loc, "GOMP_critical_name_start");
150 KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
151 __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
154 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) {
155 int gtid = __kmp_get_gtid();
156 MKLOC(loc, "GOMP_critical_name_end");
157 KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
158 __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
161 // The Gnu codegen tries to use locked operations to perform atomic updates
162 // inline. If it can't, then it calls GOMP_atomic_start() before performing
163 // the update and GOMP_atomic_end() afterward, regardless of the data type.
164 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_START)(void) {
165 int gtid = __kmp_entry_gtid();
166 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
168 #if OMPT_SUPPORT
169 __ompt_thread_assign_wait_id(0);
170 #endif
172 __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
175 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_END)(void) {
176 int gtid = __kmp_get_gtid();
177 KA_TRACE(20, ("GOMP_atomic_end: T#%d\n", gtid));
178 __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
181 int KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_START)(void) {
182 int gtid = __kmp_entry_gtid();
183 MKLOC(loc, "GOMP_single_start");
184 KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
186 if (!TCR_4(__kmp_init_parallel))
187 __kmp_parallel_initialize();
188 __kmp_resume_if_soft_paused();
190 // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
191 // workshare when USE_CHECKS is defined. We need to avoid the push,
192 // as there is no corresponding GOMP_single_end() call.
193 kmp_int32 rc = __kmp_enter_single(gtid, &loc, FALSE);
195 #if OMPT_SUPPORT && OMPT_OPTIONAL
196 kmp_info_t *this_thr = __kmp_threads[gtid];
197 kmp_team_t *team = this_thr->th.th_team;
198 int tid = __kmp_tid_from_gtid(gtid);
200 if (ompt_enabled.enabled) {
201 if (rc) {
202 if (ompt_enabled.ompt_callback_work) {
203 ompt_callbacks.ompt_callback(ompt_callback_work)(
204 ompt_work_single_executor, ompt_scope_begin,
205 &(team->t.ompt_team_info.parallel_data),
206 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
207 1, OMPT_GET_RETURN_ADDRESS(0));
209 } else {
210 if (ompt_enabled.ompt_callback_work) {
211 ompt_callbacks.ompt_callback(ompt_callback_work)(
212 ompt_work_single_other, ompt_scope_begin,
213 &(team->t.ompt_team_info.parallel_data),
214 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
215 1, OMPT_GET_RETURN_ADDRESS(0));
216 ompt_callbacks.ompt_callback(ompt_callback_work)(
217 ompt_work_single_other, ompt_scope_end,
218 &(team->t.ompt_team_info.parallel_data),
219 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
220 1, OMPT_GET_RETURN_ADDRESS(0));
224 #endif
226 return rc;
229 void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) {
230 void *retval;
231 int gtid = __kmp_entry_gtid();
232 MKLOC(loc, "GOMP_single_copy_start");
233 KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
235 if (!TCR_4(__kmp_init_parallel))
236 __kmp_parallel_initialize();
237 __kmp_resume_if_soft_paused();
239 // If this is the first thread to enter, return NULL. The generated code will
240 // then call GOMP_single_copy_end() for this thread only, with the
241 // copyprivate data pointer as an argument.
242 if (__kmp_enter_single(gtid, &loc, FALSE))
243 return NULL;
245 // Wait for the first thread to set the copyprivate data pointer,
246 // and for all other threads to reach this point.
248 #if OMPT_SUPPORT && OMPT_OPTIONAL
249 ompt_frame_t *ompt_frame;
250 if (ompt_enabled.enabled) {
251 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
252 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
253 OMPT_STORE_RETURN_ADDRESS(gtid);
255 #endif
256 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
258 // Retrieve the value of the copyprivate data point, and wait for all
259 // threads to do likewise, then return.
260 retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
261 #if OMPT_SUPPORT && OMPT_OPTIONAL
262 if (ompt_enabled.enabled) {
263 OMPT_STORE_RETURN_ADDRESS(gtid);
265 #endif
266 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
267 #if OMPT_SUPPORT && OMPT_OPTIONAL
268 if (ompt_enabled.enabled) {
269 ompt_frame->enter_frame = ompt_data_none;
271 #endif
272 return retval;
275 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) {
276 int gtid = __kmp_get_gtid();
277 KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
279 // Set the copyprivate data pointer fo the team, then hit the barrier so that
280 // the other threads will continue on and read it. Hit another barrier before
281 // continuing, so that the know that the copyprivate data pointer has been
282 // propagated to all threads before trying to reuse the t_copypriv_data field.
283 __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
284 #if OMPT_SUPPORT && OMPT_OPTIONAL
285 ompt_frame_t *ompt_frame;
286 if (ompt_enabled.enabled) {
287 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
288 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
289 OMPT_STORE_RETURN_ADDRESS(gtid);
291 #endif
292 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
293 #if OMPT_SUPPORT && OMPT_OPTIONAL
294 if (ompt_enabled.enabled) {
295 OMPT_STORE_RETURN_ADDRESS(gtid);
297 #endif
298 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
299 #if OMPT_SUPPORT && OMPT_OPTIONAL
300 if (ompt_enabled.enabled) {
301 ompt_frame->enter_frame = ompt_data_none;
303 #endif
306 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_START)(void) {
307 int gtid = __kmp_entry_gtid();
308 MKLOC(loc, "GOMP_ordered_start");
309 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
310 #if OMPT_SUPPORT && OMPT_OPTIONAL
311 OMPT_STORE_RETURN_ADDRESS(gtid);
312 #endif
313 __kmpc_ordered(&loc, gtid);
316 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) {
317 int gtid = __kmp_get_gtid();
318 MKLOC(loc, "GOMP_ordered_end");
319 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
320 #if OMPT_SUPPORT && OMPT_OPTIONAL
321 OMPT_STORE_RETURN_ADDRESS(gtid);
322 #endif
323 __kmpc_end_ordered(&loc, gtid);
326 // Dispatch macro defs
328 // They come in two flavors: 64-bit unsigned, and either 32-bit signed
329 // (IA-32 architecture) or 64-bit signed (Intel(R) 64).
331 #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
332 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
333 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
334 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
335 #else
336 #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
337 #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
338 #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
339 #endif /* KMP_ARCH_X86 */
341 #define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
342 #define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
343 #define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
345 // The parallel construct
347 #ifndef KMP_DEBUG
348 static
349 #endif /* KMP_DEBUG */
350 void
351 __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
352 void *data) {
353 #if OMPT_SUPPORT
354 kmp_info_t *thr;
355 ompt_frame_t *ompt_frame;
356 ompt_state_t enclosing_state;
358 if (ompt_enabled.enabled) {
359 // get pointer to thread data structure
360 thr = __kmp_threads[*gtid];
362 // save enclosing task state; set current state for task
363 enclosing_state = thr->th.ompt_thread_info.state;
364 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
366 // set task frame
367 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
368 ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
370 #endif
372 task(data);
374 #if OMPT_SUPPORT
375 if (ompt_enabled.enabled) {
376 // clear task frame
377 ompt_frame->exit_frame = ompt_data_none;
379 // restore enclosing state
380 thr->th.ompt_thread_info.state = enclosing_state;
382 #endif
385 #ifndef KMP_DEBUG
386 static
387 #endif /* KMP_DEBUG */
388 void
389 __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
390 void (*task)(void *), void *data,
391 unsigned num_threads, ident_t *loc,
392 enum sched_type schedule, long start,
393 long end, long incr,
394 long chunk_size) {
395 // Initialize the loop worksharing construct.
397 KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
398 schedule != kmp_sch_static);
400 #if OMPT_SUPPORT
401 kmp_info_t *thr;
402 ompt_frame_t *ompt_frame;
403 ompt_state_t enclosing_state;
405 if (ompt_enabled.enabled) {
406 thr = __kmp_threads[*gtid];
407 // save enclosing task state; set current state for task
408 enclosing_state = thr->th.ompt_thread_info.state;
409 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
411 // set task frame
412 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
413 ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
415 #endif
417 // Now invoke the microtask.
418 task(data);
420 #if OMPT_SUPPORT
421 if (ompt_enabled.enabled) {
422 // clear task frame
423 ompt_frame->exit_frame = ompt_data_none;
425 // reset enclosing state
426 thr->th.ompt_thread_info.state = enclosing_state;
428 #endif
431 static void __kmp_GOMP_fork_call(ident_t *loc, int gtid, unsigned num_threads,
432 unsigned flags, void (*unwrapped_task)(void *),
433 microtask_t wrapper, int argc, ...) {
434 int rc;
435 kmp_info_t *thr = __kmp_threads[gtid];
436 kmp_team_t *team = thr->th.th_team;
437 int tid = __kmp_tid_from_gtid(gtid);
439 va_list ap;
440 va_start(ap, argc);
442 if (num_threads != 0)
443 __kmp_push_num_threads(loc, gtid, num_threads);
444 if (flags != 0)
445 __kmp_push_proc_bind(loc, gtid, (kmp_proc_bind_t)flags);
446 rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc, wrapper,
447 __kmp_invoke_task_func, kmp_va_addr_of(ap));
449 va_end(ap);
451 if (rc) {
452 __kmp_run_before_invoked_task(gtid, tid, thr, team);
455 #if OMPT_SUPPORT
456 int ompt_team_size;
457 if (ompt_enabled.enabled) {
458 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
459 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
461 // implicit task callback
462 if (ompt_enabled.ompt_callback_implicit_task) {
463 ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc;
464 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
465 ompt_scope_begin, &(team_info->parallel_data),
466 &(task_info->task_data), ompt_team_size, __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
467 task_info->thread_num = __kmp_tid_from_gtid(gtid);
469 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
471 #endif
474 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *),
475 void *data,
476 unsigned num_threads) {
477 int gtid = __kmp_entry_gtid();
479 #if OMPT_SUPPORT
480 ompt_frame_t *parent_frame, *frame;
482 if (ompt_enabled.enabled) {
483 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
484 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
485 OMPT_STORE_RETURN_ADDRESS(gtid);
487 #endif
489 MKLOC(loc, "GOMP_parallel_start");
490 KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
491 __kmp_GOMP_fork_call(&loc, gtid, num_threads, 0u, task,
492 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
493 data);
494 #if OMPT_SUPPORT
495 if (ompt_enabled.enabled) {
496 __ompt_get_task_info_internal(0, NULL, NULL, &frame, NULL, NULL);
497 frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
499 #endif
502 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(void) {
503 int gtid = __kmp_get_gtid();
504 kmp_info_t *thr;
506 thr = __kmp_threads[gtid];
508 MKLOC(loc, "GOMP_parallel_end");
509 KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
511 if (!thr->th.th_team->t.t_serialized) {
512 __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
513 thr->th.th_team);
515 #if OMPT_SUPPORT
516 if (ompt_enabled.enabled) {
517 // Implicit task is finished here, in the barrier we might schedule
518 // deferred tasks,
519 // these don't see the implicit task on the stack
520 OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = ompt_data_none;
522 #endif
524 __kmp_join_call(&loc, gtid
525 #if OMPT_SUPPORT
527 fork_context_gnu
528 #endif
532 // Loop worksharing constructs
534 // The Gnu codegen passes in an exclusive upper bound for the overall range,
535 // but the libguide dispatch code expects an inclusive upper bound, hence the
536 // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
537 // argument to __kmp_GOMP_fork_call).
539 // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
540 // but the Gnu codegen expects an exclusive upper bound, so the adjustment
541 // "*p_ub += stride" compensates for the discrepancy.
543 // Correction: the gnu codegen always adjusts the upper bound by +-1, not the
544 // stride value. We adjust the dispatch parameters accordingly (by +-1), but
545 // we still adjust p_ub by the actual stride value.
547 // The "runtime" versions do not take a chunk_sz parameter.
549 // The profile lib cannot support construct checking of unordered loops that
550 // are predetermined by the compiler to be statically scheduled, as the gcc
551 // codegen will not always emit calls to GOMP_loop_static_next() to get the
552 // next iteration. Instead, it emits inline code to call omp_get_thread_num()
553 // num and calculate the iteration space using the result. It doesn't do this
554 // with ordered static loop, so they can be checked.
556 #if OMPT_SUPPORT
557 #define IF_OMPT_SUPPORT(code) code
558 #else
559 #define IF_OMPT_SUPPORT(code)
560 #endif
562 #define LOOP_START(func, schedule) \
563 int func(long lb, long ub, long str, long chunk_sz, long *p_lb, \
564 long *p_ub) { \
565 int status; \
566 long stride; \
567 int gtid = __kmp_entry_gtid(); \
568 MKLOC(loc, KMP_STR(func)); \
569 KA_TRACE( \
570 20, \
571 (KMP_STR( \
572 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
573 gtid, lb, ub, str, chunk_sz)); \
575 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
576 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
577 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
578 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
579 (schedule) != kmp_sch_static); \
580 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
581 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
582 (kmp_int *)p_ub, (kmp_int *)&stride); \
583 if (status) { \
584 KMP_DEBUG_ASSERT(stride == str); \
585 *p_ub += (str > 0) ? 1 : -1; \
587 } else { \
588 status = 0; \
591 KA_TRACE( \
592 20, \
593 (KMP_STR( \
594 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
595 gtid, *p_lb, *p_ub, status)); \
596 return status; \
599 #define LOOP_RUNTIME_START(func, schedule) \
600 int func(long lb, long ub, long str, long *p_lb, long *p_ub) { \
601 int status; \
602 long stride; \
603 long chunk_sz = 0; \
604 int gtid = __kmp_entry_gtid(); \
605 MKLOC(loc, KMP_STR(func)); \
606 KA_TRACE( \
607 20, \
608 (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
609 gtid, lb, ub, str, chunk_sz)); \
611 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
612 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
613 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
614 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
615 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
616 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
617 (kmp_int *)p_ub, (kmp_int *)&stride); \
618 if (status) { \
619 KMP_DEBUG_ASSERT(stride == str); \
620 *p_ub += (str > 0) ? 1 : -1; \
622 } else { \
623 status = 0; \
626 KA_TRACE( \
627 20, \
628 (KMP_STR( \
629 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
630 gtid, *p_lb, *p_ub, status)); \
631 return status; \
634 #define KMP_DOACROSS_FINI(status, gtid) \
635 if (!status && __kmp_threads[gtid]->th.th_dispatch->th_doacross_flags) { \
636 __kmpc_doacross_fini(NULL, gtid); \
639 #define LOOP_NEXT(func, fini_code) \
640 int func(long *p_lb, long *p_ub) { \
641 int status; \
642 long stride; \
643 int gtid = __kmp_get_gtid(); \
644 MKLOC(loc, KMP_STR(func)); \
645 KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \
647 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
648 fini_code status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
649 (kmp_int *)p_ub, (kmp_int *)&stride); \
650 if (status) { \
651 *p_ub += (stride > 0) ? 1 : -1; \
653 KMP_DOACROSS_FINI(status, gtid) \
655 KA_TRACE( \
656 20, \
657 (KMP_STR(func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
658 "returning %d\n", \
659 gtid, *p_lb, *p_ub, stride, status)); \
660 return status; \
663 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
664 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
665 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START),
666 kmp_sch_dynamic_chunked)
667 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START),
668 kmp_sch_dynamic_chunked)
669 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
670 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT), {})
671 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START),
672 kmp_sch_guided_chunked)
673 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START),
674 kmp_sch_guided_chunked)
675 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
676 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT), {})
677 LOOP_RUNTIME_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START),
678 kmp_sch_runtime)
679 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
680 LOOP_RUNTIME_START(
681 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START),
682 kmp_sch_runtime)
683 LOOP_RUNTIME_START(
684 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START),
685 kmp_sch_runtime)
686 LOOP_NEXT(
687 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT), {})
688 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT), {})
690 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START),
691 kmp_ord_static)
692 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT),
693 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
694 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START),
695 kmp_ord_dynamic_chunked)
696 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT),
697 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
698 LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START),
699 kmp_ord_guided_chunked)
700 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT),
701 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
702 LOOP_RUNTIME_START(
703 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START),
704 kmp_ord_runtime)
705 LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT),
706 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
708 #define LOOP_DOACROSS_START(func, schedule) \
709 bool func(unsigned ncounts, long *counts, long chunk_sz, long *p_lb, \
710 long *p_ub) { \
711 int status; \
712 long stride, lb, ub, str; \
713 int gtid = __kmp_entry_gtid(); \
714 struct kmp_dim *dims = \
715 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
716 MKLOC(loc, KMP_STR(func)); \
717 for (unsigned i = 0; i < ncounts; ++i) { \
718 dims[i].lo = 0; \
719 dims[i].up = counts[i] - 1; \
720 dims[i].st = 1; \
722 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
723 lb = 0; \
724 ub = counts[0]; \
725 str = 1; \
726 KA_TRACE(20, (KMP_STR(func) ": T#%d, ncounts %u, lb 0x%lx, ub 0x%lx, str " \
727 "0x%lx, chunk_sz " \
728 "0x%lx\n", \
729 gtid, ncounts, lb, ub, str, chunk_sz)); \
731 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
732 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
733 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
734 (schedule) != kmp_sch_static); \
735 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
736 (kmp_int *)p_ub, (kmp_int *)&stride); \
737 if (status) { \
738 KMP_DEBUG_ASSERT(stride == str); \
739 *p_ub += (str > 0) ? 1 : -1; \
741 } else { \
742 status = 0; \
744 KMP_DOACROSS_FINI(status, gtid); \
746 KA_TRACE( \
747 20, \
748 (KMP_STR( \
749 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
750 gtid, *p_lb, *p_ub, status)); \
751 __kmp_free(dims); \
752 return status; \
755 #define LOOP_DOACROSS_RUNTIME_START(func, schedule) \
756 int func(unsigned ncounts, long *counts, long *p_lb, long *p_ub) { \
757 int status; \
758 long stride, lb, ub, str; \
759 long chunk_sz = 0; \
760 int gtid = __kmp_entry_gtid(); \
761 struct kmp_dim *dims = \
762 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
763 MKLOC(loc, KMP_STR(func)); \
764 for (unsigned i = 0; i < ncounts; ++i) { \
765 dims[i].lo = 0; \
766 dims[i].up = counts[i] - 1; \
767 dims[i].st = 1; \
769 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
770 lb = 0; \
771 ub = counts[0]; \
772 str = 1; \
773 KA_TRACE( \
774 20, \
775 (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
776 gtid, lb, ub, str, chunk_sz)); \
778 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
779 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
780 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
781 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
782 (kmp_int *)p_ub, (kmp_int *)&stride); \
783 if (status) { \
784 KMP_DEBUG_ASSERT(stride == str); \
785 *p_ub += (str > 0) ? 1 : -1; \
787 } else { \
788 status = 0; \
790 KMP_DOACROSS_FINI(status, gtid); \
792 KA_TRACE( \
793 20, \
794 (KMP_STR( \
795 func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
796 gtid, *p_lb, *p_ub, status)); \
797 __kmp_free(dims); \
798 return status; \
801 LOOP_DOACROSS_START(
802 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START),
803 kmp_sch_static)
804 LOOP_DOACROSS_START(
805 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START),
806 kmp_sch_dynamic_chunked)
807 LOOP_DOACROSS_START(
808 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START),
809 kmp_sch_guided_chunked)
810 LOOP_DOACROSS_RUNTIME_START(
811 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START),
812 kmp_sch_runtime)
814 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END)(void) {
815 int gtid = __kmp_get_gtid();
816 KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
818 #if OMPT_SUPPORT && OMPT_OPTIONAL
819 ompt_frame_t *ompt_frame;
820 if (ompt_enabled.enabled) {
821 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
822 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
823 OMPT_STORE_RETURN_ADDRESS(gtid);
825 #endif
826 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
827 #if OMPT_SUPPORT && OMPT_OPTIONAL
828 if (ompt_enabled.enabled) {
829 ompt_frame->enter_frame = ompt_data_none;
831 #endif
833 KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
836 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) {
837 KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
840 // Unsigned long long loop worksharing constructs
842 // These are new with gcc 4.4
844 #define LOOP_START_ULL(func, schedule) \
845 int func(int up, unsigned long long lb, unsigned long long ub, \
846 unsigned long long str, unsigned long long chunk_sz, \
847 unsigned long long *p_lb, unsigned long long *p_ub) { \
848 int status; \
849 long long str2 = up ? ((long long)str) : -((long long)str); \
850 long long stride; \
851 int gtid = __kmp_entry_gtid(); \
852 MKLOC(loc, KMP_STR(func)); \
854 KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \
855 "0x%llx, chunk_sz 0x%llx\n", \
856 gtid, up, lb, ub, str, chunk_sz)); \
858 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
859 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
860 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
861 (schedule) != kmp_sch_static); \
862 status = \
863 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
864 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
865 if (status) { \
866 KMP_DEBUG_ASSERT(stride == str2); \
867 *p_ub += (str > 0) ? 1 : -1; \
869 } else { \
870 status = 0; \
873 KA_TRACE( \
874 20, \
875 (KMP_STR( \
876 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
877 gtid, *p_lb, *p_ub, status)); \
878 return status; \
881 #define LOOP_RUNTIME_START_ULL(func, schedule) \
882 int func(int up, unsigned long long lb, unsigned long long ub, \
883 unsigned long long str, unsigned long long *p_lb, \
884 unsigned long long *p_ub) { \
885 int status; \
886 long long str2 = up ? ((long long)str) : -((long long)str); \
887 unsigned long long stride; \
888 unsigned long long chunk_sz = 0; \
889 int gtid = __kmp_entry_gtid(); \
890 MKLOC(loc, KMP_STR(func)); \
892 KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \
893 "0x%llx, chunk_sz 0x%llx\n", \
894 gtid, up, lb, ub, str, chunk_sz)); \
896 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
897 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
898 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
899 TRUE); \
900 status = \
901 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
902 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
903 if (status) { \
904 KMP_DEBUG_ASSERT((long long)stride == str2); \
905 *p_ub += (str > 0) ? 1 : -1; \
907 } else { \
908 status = 0; \
911 KA_TRACE( \
912 20, \
913 (KMP_STR( \
914 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
915 gtid, *p_lb, *p_ub, status)); \
916 return status; \
919 #define LOOP_NEXT_ULL(func, fini_code) \
920 int func(unsigned long long *p_lb, unsigned long long *p_ub) { \
921 int status; \
922 long long stride; \
923 int gtid = __kmp_get_gtid(); \
924 MKLOC(loc, KMP_STR(func)); \
925 KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \
927 fini_code status = \
928 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
929 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
930 if (status) { \
931 *p_ub += (stride > 0) ? 1 : -1; \
934 KA_TRACE( \
935 20, \
936 (KMP_STR( \
937 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
938 "returning %d\n", \
939 gtid, *p_lb, *p_ub, stride, status)); \
940 return status; \
943 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START),
944 kmp_sch_static)
945 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
946 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START),
947 kmp_sch_dynamic_chunked)
948 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
949 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START),
950 kmp_sch_guided_chunked)
951 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
952 LOOP_START_ULL(
953 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START),
954 kmp_sch_dynamic_chunked)
955 LOOP_NEXT_ULL(
956 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT), {})
957 LOOP_START_ULL(
958 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START),
959 kmp_sch_guided_chunked)
960 LOOP_NEXT_ULL(
961 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT), {})
962 LOOP_RUNTIME_START_ULL(
963 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
964 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
965 LOOP_RUNTIME_START_ULL(
966 KMP_EXPAND_NAME(
967 KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START),
968 kmp_sch_runtime)
969 LOOP_RUNTIME_START_ULL(
970 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START),
971 kmp_sch_runtime)
972 LOOP_NEXT_ULL(
973 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT),
975 LOOP_NEXT_ULL(
976 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT), {})
978 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START),
979 kmp_ord_static)
980 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT),
981 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
982 LOOP_START_ULL(
983 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START),
984 kmp_ord_dynamic_chunked)
985 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT),
986 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
987 LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START),
988 kmp_ord_guided_chunked)
989 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT),
990 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
991 LOOP_RUNTIME_START_ULL(
992 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START),
993 kmp_ord_runtime)
994 LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT),
995 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
997 #define LOOP_DOACROSS_START_ULL(func, schedule) \
998 int func(unsigned ncounts, unsigned long long *counts, \
999 unsigned long long chunk_sz, unsigned long long *p_lb, \
1000 unsigned long long *p_ub) { \
1001 int status; \
1002 long long stride, str, lb, ub; \
1003 int gtid = __kmp_entry_gtid(); \
1004 struct kmp_dim *dims = \
1005 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
1006 MKLOC(loc, KMP_STR(func)); \
1007 for (unsigned i = 0; i < ncounts; ++i) { \
1008 dims[i].lo = 0; \
1009 dims[i].up = counts[i] - 1; \
1010 dims[i].st = 1; \
1012 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
1013 lb = 0; \
1014 ub = counts[0]; \
1015 str = 1; \
1017 KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \
1018 "0x%llx, chunk_sz 0x%llx\n", \
1019 gtid, lb, ub, str, chunk_sz)); \
1021 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
1022 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
1023 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1024 (schedule) != kmp_sch_static); \
1025 status = \
1026 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
1027 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
1028 if (status) { \
1029 KMP_DEBUG_ASSERT(stride == str); \
1030 *p_ub += (str > 0) ? 1 : -1; \
1032 } else { \
1033 status = 0; \
1035 KMP_DOACROSS_FINI(status, gtid); \
1037 KA_TRACE( \
1038 20, \
1039 (KMP_STR( \
1040 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
1041 gtid, *p_lb, *p_ub, status)); \
1042 __kmp_free(dims); \
1043 return status; \
1046 #define LOOP_DOACROSS_RUNTIME_START_ULL(func, schedule) \
1047 int func(unsigned ncounts, unsigned long long *counts, \
1048 unsigned long long *p_lb, unsigned long long *p_ub) { \
1049 int status; \
1050 unsigned long long stride, str, lb, ub; \
1051 unsigned long long chunk_sz = 0; \
1052 int gtid = __kmp_entry_gtid(); \
1053 struct kmp_dim *dims = \
1054 (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \
1055 MKLOC(loc, KMP_STR(func)); \
1056 for (unsigned i = 0; i < ncounts; ++i) { \
1057 dims[i].lo = 0; \
1058 dims[i].up = counts[i] - 1; \
1059 dims[i].st = 1; \
1061 __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \
1062 lb = 0; \
1063 ub = counts[0]; \
1064 str = 1; \
1065 KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \
1066 "0x%llx, chunk_sz 0x%llx\n", \
1067 gtid, lb, ub, str, chunk_sz)); \
1069 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
1070 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
1071 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1072 TRUE); \
1073 status = \
1074 KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
1075 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
1076 if (status) { \
1077 KMP_DEBUG_ASSERT(stride == str); \
1078 *p_ub += (str > 0) ? 1 : -1; \
1080 } else { \
1081 status = 0; \
1083 KMP_DOACROSS_FINI(status, gtid); \
1085 KA_TRACE( \
1086 20, \
1087 (KMP_STR( \
1088 func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
1089 gtid, *p_lb, *p_ub, status)); \
1090 __kmp_free(dims); \
1091 return status; \
1094 LOOP_DOACROSS_START_ULL(
1095 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START),
1096 kmp_sch_static)
1097 LOOP_DOACROSS_START_ULL(
1098 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START),
1099 kmp_sch_dynamic_chunked)
1100 LOOP_DOACROSS_START_ULL(
1101 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START),
1102 kmp_sch_guided_chunked)
1103 LOOP_DOACROSS_RUNTIME_START_ULL(
1104 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START),
1105 kmp_sch_runtime)
1107 // Combined parallel / loop worksharing constructs
1109 // There are no ull versions (yet).
1111 #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \
1112 void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \
1113 long ub, long str, long chunk_sz) { \
1114 int gtid = __kmp_entry_gtid(); \
1115 MKLOC(loc, KMP_STR(func)); \
1116 KA_TRACE( \
1117 20, \
1118 (KMP_STR( \
1119 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1120 gtid, lb, ub, str, chunk_sz)); \
1122 ompt_pre(); \
1124 __kmp_GOMP_fork_call(&loc, gtid, num_threads, 0u, task, \
1125 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
1126 9, task, data, num_threads, &loc, (schedule), lb, \
1127 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1128 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \
1130 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1131 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1132 (schedule) != kmp_sch_static); \
1134 ompt_post(); \
1136 KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \
1139 #if OMPT_SUPPORT && OMPT_OPTIONAL
1141 #define OMPT_LOOP_PRE() \
1142 ompt_frame_t *parent_frame; \
1143 if (ompt_enabled.enabled) { \
1144 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); \
1145 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); \
1146 OMPT_STORE_RETURN_ADDRESS(gtid); \
1149 #define OMPT_LOOP_POST() \
1150 if (ompt_enabled.enabled) { \
1151 parent_frame->enter_frame = ompt_data_none; \
1154 #else
1156 #define OMPT_LOOP_PRE()
1158 #define OMPT_LOOP_POST()
1160 #endif
1162 PARALLEL_LOOP_START(
1163 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
1164 kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1165 PARALLEL_LOOP_START(
1166 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
1167 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1168 PARALLEL_LOOP_START(
1169 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
1170 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1171 PARALLEL_LOOP_START(
1172 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
1173 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1175 // Tasking constructs
1177 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data,
1178 void (*copy_func)(void *, void *),
1179 long arg_size, long arg_align,
1180 bool if_cond, unsigned gomp_flags,
1181 void **depend) {
1182 MKLOC(loc, "GOMP_task");
1183 int gtid = __kmp_entry_gtid();
1184 kmp_int32 flags = 0;
1185 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1187 KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
1189 // The low-order bit is the "untied" flag
1190 if (!(gomp_flags & KMP_GOMP_TASK_UNTIED_FLAG)) {
1191 input_flags->tiedness = 1;
1193 // The second low-order bit is the "final" flag
1194 if (gomp_flags & KMP_GOMP_TASK_FINAL_FLAG) {
1195 input_flags->final = 1;
1197 input_flags->native = 1;
1198 // __kmp_task_alloc() sets up all other flags
1200 if (!if_cond) {
1201 arg_size = 0;
1204 kmp_task_t *task = __kmp_task_alloc(
1205 &loc, gtid, input_flags, sizeof(kmp_task_t),
1206 arg_size ? arg_size + arg_align - 1 : 0, (kmp_routine_entry_t)func);
1208 if (arg_size > 0) {
1209 if (arg_align > 0) {
1210 task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) /
1211 arg_align * arg_align);
1213 // else error??
1215 if (copy_func) {
1216 (*copy_func)(task->shareds, data);
1217 } else {
1218 KMP_MEMCPY(task->shareds, data, arg_size);
1222 #if OMPT_SUPPORT
1223 kmp_taskdata_t *current_task;
1224 if (ompt_enabled.enabled) {
1225 OMPT_STORE_RETURN_ADDRESS(gtid);
1226 current_task = __kmp_threads[gtid]->th.th_current_task;
1227 current_task->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1229 #endif
1231 if (if_cond) {
1232 if (gomp_flags & KMP_GOMP_TASK_DEPENDS_FLAG) {
1233 KMP_ASSERT(depend);
1234 kmp_gomp_depends_info_t gomp_depends(depend);
1235 kmp_int32 ndeps = gomp_depends.get_num_deps();
1236 kmp_depend_info_t dep_list[ndeps];
1237 for (kmp_int32 i = 0; i < ndeps; i++)
1238 dep_list[i] = gomp_depends.get_kmp_depend(i);
1239 __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps, dep_list, 0, NULL);
1240 } else {
1241 __kmpc_omp_task(&loc, gtid, task);
1243 } else {
1244 #if OMPT_SUPPORT
1245 ompt_thread_info_t oldInfo;
1246 kmp_info_t *thread;
1247 kmp_taskdata_t *taskdata;
1248 if (ompt_enabled.enabled) {
1249 // Store the threads states and restore them after the task
1250 thread = __kmp_threads[gtid];
1251 taskdata = KMP_TASK_TO_TASKDATA(task);
1252 oldInfo = thread->th.ompt_thread_info;
1253 thread->th.ompt_thread_info.wait_id = 0;
1254 thread->th.ompt_thread_info.state = ompt_state_work_parallel;
1255 taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1256 OMPT_STORE_RETURN_ADDRESS(gtid);
1258 #endif
1259 if (gomp_flags & KMP_GOMP_TASK_DEPENDS_FLAG) {
1260 KMP_ASSERT(depend);
1261 kmp_gomp_depends_info_t gomp_depends(depend);
1262 kmp_int32 ndeps = gomp_depends.get_num_deps();
1263 kmp_depend_info_t dep_list[ndeps];
1264 for (kmp_int32 i = 0; i < ndeps; i++)
1265 dep_list[i] = gomp_depends.get_kmp_depend(i);
1266 __kmpc_omp_wait_deps(&loc, gtid, ndeps, dep_list, 0, NULL);
1269 __kmpc_omp_task_begin_if0(&loc, gtid, task);
1270 func(data);
1271 __kmpc_omp_task_complete_if0(&loc, gtid, task);
1273 #if OMPT_SUPPORT
1274 if (ompt_enabled.enabled) {
1275 thread->th.ompt_thread_info = oldInfo;
1276 taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1278 #endif
1280 #if OMPT_SUPPORT
1281 if (ompt_enabled.enabled) {
1282 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
1284 #endif
1286 KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
1289 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT)(void) {
1290 MKLOC(loc, "GOMP_taskwait");
1291 int gtid = __kmp_entry_gtid();
1293 #if OMPT_SUPPORT
1294 if (ompt_enabled.enabled)
1295 OMPT_STORE_RETURN_ADDRESS(gtid);
1296 #endif
1298 KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
1300 __kmpc_omp_taskwait(&loc, gtid);
1302 KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
1305 // Sections worksharing constructs
1307 // For the sections construct, we initialize a dynamically scheduled loop
1308 // worksharing construct with lb 1 and stride 1, and use the iteration #'s
1309 // that its returns as sections ids.
1311 // There are no special entry points for ordered sections, so we always use
1312 // the dynamically scheduled workshare, even if the sections aren't ordered.
1314 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) {
1315 int status;
1316 kmp_int lb, ub, stride;
1317 int gtid = __kmp_entry_gtid();
1318 MKLOC(loc, "GOMP_sections_start");
1319 KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
1321 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1323 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1324 if (status) {
1325 KMP_DEBUG_ASSERT(stride == 1);
1326 KMP_DEBUG_ASSERT(lb > 0);
1327 KMP_ASSERT(lb == ub);
1328 } else {
1329 lb = 0;
1332 KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1333 (unsigned)lb));
1334 return (unsigned)lb;
1337 unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) {
1338 int status;
1339 kmp_int lb, ub, stride;
1340 int gtid = __kmp_get_gtid();
1341 MKLOC(loc, "GOMP_sections_next");
1342 KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1344 #if OMPT_SUPPORT
1345 OMPT_STORE_RETURN_ADDRESS(gtid);
1346 #endif
1348 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1349 if (status) {
1350 KMP_DEBUG_ASSERT(stride == 1);
1351 KMP_DEBUG_ASSERT(lb > 0);
1352 KMP_ASSERT(lb == ub);
1353 } else {
1354 lb = 0;
1357 KA_TRACE(
1358 20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid, (unsigned)lb));
1359 return (unsigned)lb;
1362 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(
1363 void (*task)(void *), void *data, unsigned num_threads, unsigned count) {
1364 int gtid = __kmp_entry_gtid();
1366 #if OMPT_SUPPORT
1367 ompt_frame_t *parent_frame;
1369 if (ompt_enabled.enabled) {
1370 __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL);
1371 parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1372 OMPT_STORE_RETURN_ADDRESS(gtid);
1374 #endif
1376 MKLOC(loc, "GOMP_parallel_sections_start");
1377 KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1379 __kmp_GOMP_fork_call(&loc, gtid, num_threads, 0u, task,
1380 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1381 task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1382 (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1384 #if OMPT_SUPPORT
1385 if (ompt_enabled.enabled) {
1386 parent_frame->enter_frame = ompt_data_none;
1388 #endif
1390 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1392 KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1395 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END)(void) {
1396 int gtid = __kmp_get_gtid();
1397 KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1399 #if OMPT_SUPPORT
1400 ompt_frame_t *ompt_frame;
1401 if (ompt_enabled.enabled) {
1402 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
1403 ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1404 OMPT_STORE_RETURN_ADDRESS(gtid);
1406 #endif
1407 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1408 #if OMPT_SUPPORT
1409 if (ompt_enabled.enabled) {
1410 ompt_frame->enter_frame = ompt_data_none;
1412 #endif
1414 KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1417 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) {
1418 KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1421 // libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1422 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKYIELD)(void) {
1423 KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1424 return;
1427 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *),
1428 void *data,
1429 unsigned num_threads,
1430 unsigned int flags) {
1431 int gtid = __kmp_entry_gtid();
1432 MKLOC(loc, "GOMP_parallel");
1433 KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1435 #if OMPT_SUPPORT
1436 ompt_task_info_t *parent_task_info, *task_info;
1437 if (ompt_enabled.enabled) {
1438 parent_task_info = __ompt_get_task_info_object(0);
1439 parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1440 OMPT_STORE_RETURN_ADDRESS(gtid);
1442 #endif
1443 __kmp_GOMP_fork_call(&loc, gtid, num_threads, flags, task,
1444 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task,
1445 data);
1446 #if OMPT_SUPPORT
1447 if (ompt_enabled.enabled) {
1448 task_info = __ompt_get_task_info_object(0);
1449 task_info->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1451 #endif
1452 task(data);
1453 #if OMPT_SUPPORT
1454 if (ompt_enabled.enabled) {
1455 OMPT_STORE_RETURN_ADDRESS(gtid);
1457 #endif
1458 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1459 #if OMPT_SUPPORT
1460 if (ompt_enabled.enabled) {
1461 task_info->frame.exit_frame = ompt_data_none;
1462 parent_task_info->frame.enter_frame = ompt_data_none;
1464 #endif
1467 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task)(void *),
1468 void *data,
1469 unsigned num_threads,
1470 unsigned count,
1471 unsigned flags) {
1472 int gtid = __kmp_entry_gtid();
1473 MKLOC(loc, "GOMP_parallel_sections");
1474 KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1476 #if OMPT_SUPPORT
1477 OMPT_STORE_RETURN_ADDRESS(gtid);
1478 #endif
1480 __kmp_GOMP_fork_call(&loc, gtid, num_threads, flags, task,
1481 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9,
1482 task, data, num_threads, &loc, kmp_nm_dynamic_chunked,
1483 (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1);
1485 #if OMPT_SUPPORT
1486 OMPT_STORE_RETURN_ADDRESS(gtid);
1487 #endif
1489 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1491 task(data);
1492 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)();
1493 KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1496 #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post) \
1497 void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \
1498 long ub, long str, long chunk_sz, unsigned flags) { \
1499 int gtid = __kmp_entry_gtid(); \
1500 MKLOC(loc, KMP_STR(func)); \
1501 KA_TRACE( \
1502 20, \
1503 (KMP_STR( \
1504 func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1505 gtid, lb, ub, str, chunk_sz)); \
1507 ompt_pre(); \
1508 __kmp_GOMP_fork_call(&loc, gtid, num_threads, flags, task, \
1509 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \
1510 9, task, data, num_threads, &loc, (schedule), lb, \
1511 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1513 IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \
1514 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1515 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1516 (schedule) != kmp_sch_static); \
1517 task(data); \
1518 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); \
1519 ompt_post(); \
1521 KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \
1524 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC),
1525 kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1526 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC),
1527 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1528 PARALLEL_LOOP(
1529 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED),
1530 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1531 PARALLEL_LOOP(
1532 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC),
1533 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1534 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED),
1535 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1536 PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME),
1537 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1538 PARALLEL_LOOP(
1539 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME),
1540 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1541 PARALLEL_LOOP(
1542 KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME),
1543 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
1545 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_START)(void) {
1546 int gtid = __kmp_entry_gtid();
1547 MKLOC(loc, "GOMP_taskgroup_start");
1548 KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1550 #if OMPT_SUPPORT
1551 if (ompt_enabled.enabled)
1552 OMPT_STORE_RETURN_ADDRESS(gtid);
1553 #endif
1555 __kmpc_taskgroup(&loc, gtid);
1557 return;
1560 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_END)(void) {
1561 int gtid = __kmp_get_gtid();
1562 MKLOC(loc, "GOMP_taskgroup_end");
1563 KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1565 #if OMPT_SUPPORT
1566 if (ompt_enabled.enabled)
1567 OMPT_STORE_RETURN_ADDRESS(gtid);
1568 #endif
1570 __kmpc_end_taskgroup(&loc, gtid);
1572 return;
1575 static kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
1576 kmp_int32 cncl_kind = 0;
1577 switch (gomp_kind) {
1578 case 1:
1579 cncl_kind = cancel_parallel;
1580 break;
1581 case 2:
1582 cncl_kind = cancel_loop;
1583 break;
1584 case 4:
1585 cncl_kind = cancel_sections;
1586 break;
1587 case 8:
1588 cncl_kind = cancel_taskgroup;
1589 break;
1591 return cncl_kind;
1594 // Return true if cancellation should take place, false otherwise
1595 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) {
1596 int gtid = __kmp_get_gtid();
1597 MKLOC(loc, "GOMP_cancellation_point");
1598 KA_TRACE(20, ("GOMP_cancellation_point: T#%d which:%d\n", gtid, which));
1599 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1600 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1603 // Return true if cancellation should take place, false otherwise
1604 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) {
1605 int gtid = __kmp_get_gtid();
1606 MKLOC(loc, "GOMP_cancel");
1607 KA_TRACE(20, ("GOMP_cancel: T#%d which:%d do_cancel:%d\n", gtid, which,
1608 (int)do_cancel));
1609 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
1611 if (do_cancel == FALSE) {
1612 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1613 } else {
1614 return __kmpc_cancel(&loc, gtid, cncl_kind);
1618 // Return true if cancellation should take place, false otherwise
1619 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) {
1620 int gtid = __kmp_get_gtid();
1621 KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1622 return __kmp_barrier_gomp_cancel(gtid);
1625 // Return true if cancellation should take place, false otherwise
1626 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) {
1627 int gtid = __kmp_get_gtid();
1628 KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1629 return __kmp_barrier_gomp_cancel(gtid);
1632 // Return true if cancellation should take place, false otherwise
1633 bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) {
1634 int gtid = __kmp_get_gtid();
1635 KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1636 return __kmp_barrier_gomp_cancel(gtid);
1639 // All target functions are empty as of 2014-05-29
1640 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn)(void *),
1641 const void *openmp_target,
1642 size_t mapnum, void **hostaddrs,
1643 size_t *sizes,
1644 unsigned char *kinds) {
1645 return;
1648 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_DATA)(
1649 int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1650 size_t *sizes, unsigned char *kinds) {
1651 return;
1654 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) { return; }
1656 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_UPDATE)(
1657 int device, const void *openmp_target, size_t mapnum, void **hostaddrs,
1658 size_t *sizes, unsigned char *kinds) {
1659 return;
1662 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams,
1663 unsigned int thread_limit) {
1664 return;
1667 // Task duplication function which copies src to dest (both are
1668 // preallocated task structures)
1669 static void __kmp_gomp_task_dup(kmp_task_t *dest, kmp_task_t *src,
1670 kmp_int32 last_private) {
1671 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(src);
1672 if (taskdata->td_copy_func) {
1673 (taskdata->td_copy_func)(dest->shareds, src->shareds);
1677 #ifdef __cplusplus
1678 } // extern "C"
1679 #endif
1681 template <typename T>
1682 void __GOMP_taskloop(void (*func)(void *), void *data,
1683 void (*copy_func)(void *, void *), long arg_size,
1684 long arg_align, unsigned gomp_flags,
1685 unsigned long num_tasks, int priority, T start, T end,
1686 T step) {
1687 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
1688 MKLOC(loc, "GOMP_taskloop");
1689 int sched;
1690 T *loop_bounds;
1691 int gtid = __kmp_entry_gtid();
1692 kmp_int32 flags = 0;
1693 int if_val = gomp_flags & (1u << 10);
1694 int nogroup = gomp_flags & (1u << 11);
1695 int up = gomp_flags & (1u << 8);
1696 p_task_dup_t task_dup = NULL;
1697 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1698 #ifdef KMP_DEBUG
1700 char *buff;
1701 buff = __kmp_str_format(
1702 "GOMP_taskloop: T#%%d: func:%%p data:%%p copy_func:%%p "
1703 "arg_size:%%ld arg_align:%%ld gomp_flags:0x%%x num_tasks:%%lu "
1704 "priority:%%d start:%%%s end:%%%s step:%%%s\n",
1705 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec);
1706 KA_TRACE(20, (buff, gtid, func, data, copy_func, arg_size, arg_align,
1707 gomp_flags, num_tasks, priority, start, end, step));
1708 __kmp_str_free(&buff);
1710 #endif
1711 KMP_ASSERT((size_t)arg_size >= 2 * sizeof(T));
1712 KMP_ASSERT(arg_align > 0);
1713 // The low-order bit is the "untied" flag
1714 if (!(gomp_flags & 1)) {
1715 input_flags->tiedness = 1;
1717 // The second low-order bit is the "final" flag
1718 if (gomp_flags & 2) {
1719 input_flags->final = 1;
1721 // Negative step flag
1722 if (!up) {
1723 // If step is flagged as negative, but isn't properly sign extended
1724 // Then manually sign extend it. Could be a short, int, char embedded
1725 // in a long. So cannot assume any cast.
1726 if (step > 0) {
1727 for (int i = sizeof(T) * CHAR_BIT - 1; i >= 0L; --i) {
1728 // break at the first 1 bit
1729 if (step & ((T)1 << i))
1730 break;
1731 step |= ((T)1 << i);
1735 input_flags->native = 1;
1736 // Figure out if none/grainsize/num_tasks clause specified
1737 if (num_tasks > 0) {
1738 if (gomp_flags & (1u << 9))
1739 sched = 1; // grainsize specified
1740 else
1741 sched = 2; // num_tasks specified
1742 // neither grainsize nor num_tasks specified
1743 } else {
1744 sched = 0;
1747 // __kmp_task_alloc() sets up all other flags
1748 kmp_task_t *task =
1749 __kmp_task_alloc(&loc, gtid, input_flags, sizeof(kmp_task_t),
1750 arg_size + arg_align - 1, (kmp_routine_entry_t)func);
1751 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1752 taskdata->td_copy_func = copy_func;
1753 taskdata->td_size_loop_bounds = sizeof(T);
1755 // re-align shareds if needed and setup firstprivate copy constructors
1756 // through the task_dup mechanism
1757 task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) /
1758 arg_align * arg_align);
1759 if (copy_func) {
1760 task_dup = __kmp_gomp_task_dup;
1762 KMP_MEMCPY(task->shareds, data, arg_size);
1764 loop_bounds = (T *)task->shareds;
1765 loop_bounds[0] = start;
1766 loop_bounds[1] = end + (up ? -1 : 1);
1767 __kmpc_taskloop(&loc, gtid, task, if_val, (kmp_uint64 *)&(loop_bounds[0]),
1768 (kmp_uint64 *)&(loop_bounds[1]), (kmp_int64)step, nogroup,
1769 sched, (kmp_uint64)num_tasks, (void *)task_dup);
1772 // 4 byte version of GOMP_doacross_post
1773 // This verison needs to create a temporary array which converts 4 byte
1774 // integers into 8 byte integers
1775 template <typename T, bool need_conversion = (sizeof(long) == 4)>
1776 void __kmp_GOMP_doacross_post(T *count);
1778 template <> void __kmp_GOMP_doacross_post<long, true>(long *count) {
1779 int gtid = __kmp_entry_gtid();
1780 kmp_info_t *th = __kmp_threads[gtid];
1781 MKLOC(loc, "GOMP_doacross_post");
1782 kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
1783 kmp_int64 *vec =
1784 (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims);
1785 for (kmp_int64 i = 0; i < num_dims; ++i) {
1786 vec[i] = (kmp_int64)count[i];
1788 __kmpc_doacross_post(&loc, gtid, vec);
1789 __kmp_thread_free(th, vec);
1792 // 8 byte versions of GOMP_doacross_post
1793 // This version can just pass in the count array directly instead of creating
1794 // a temporary array
1795 template <> void __kmp_GOMP_doacross_post<long, false>(long *count) {
1796 int gtid = __kmp_entry_gtid();
1797 MKLOC(loc, "GOMP_doacross_post");
1798 __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count));
1801 template <typename T> void __kmp_GOMP_doacross_wait(T first, va_list args) {
1802 int gtid = __kmp_entry_gtid();
1803 kmp_info_t *th = __kmp_threads[gtid];
1804 MKLOC(loc, "GOMP_doacross_wait");
1805 kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
1806 kmp_int64 *vec =
1807 (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims);
1808 vec[0] = (kmp_int64)first;
1809 for (kmp_int64 i = 1; i < num_dims; ++i) {
1810 T item = va_arg(args, T);
1811 vec[i] = (kmp_int64)item;
1813 __kmpc_doacross_wait(&loc, gtid, vec);
1814 __kmp_thread_free(th, vec);
1815 return;
1818 #ifdef __cplusplus
1819 extern "C" {
1820 #endif // __cplusplus
1822 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP)(
1823 void (*func)(void *), void *data, void (*copy_func)(void *, void *),
1824 long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks,
1825 int priority, long start, long end, long step) {
1826 __GOMP_taskloop<long>(func, data, copy_func, arg_size, arg_align, gomp_flags,
1827 num_tasks, priority, start, end, step);
1830 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP_ULL)(
1831 void (*func)(void *), void *data, void (*copy_func)(void *, void *),
1832 long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks,
1833 int priority, unsigned long long start, unsigned long long end,
1834 unsigned long long step) {
1835 __GOMP_taskloop<unsigned long long>(func, data, copy_func, arg_size,
1836 arg_align, gomp_flags, num_tasks,
1837 priority, start, end, step);
1840 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_POST)(long *count) {
1841 __kmp_GOMP_doacross_post(count);
1844 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_WAIT)(long first, ...) {
1845 va_list args;
1846 va_start(args, first);
1847 __kmp_GOMP_doacross_wait<long>(first, args);
1848 va_end(args);
1851 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_POST)(
1852 unsigned long long *count) {
1853 int gtid = __kmp_entry_gtid();
1854 MKLOC(loc, "GOMP_doacross_ull_post");
1855 __kmpc_doacross_post(&loc, gtid, RCAST(kmp_int64 *, count));
1858 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT)(
1859 unsigned long long first, ...) {
1860 va_list args;
1861 va_start(args, first);
1862 __kmp_GOMP_doacross_wait<unsigned long long>(first, args);
1863 va_end(args);
1866 // fn: the function each master thread of new team will call
1867 // data: argument to fn
1868 // num_teams, thread_limit: max bounds on respective ICV
1869 // flags: unused
1870 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS_REG)(void (*fn)(void *),
1871 void *data,
1872 unsigned num_teams,
1873 unsigned thread_limit,
1874 unsigned flags) {
1875 MKLOC(loc, "GOMP_teams_reg");
1876 int gtid = __kmp_entry_gtid();
1877 KA_TRACE(20, ("GOMP_teams_reg: T#%d num_teams=%u thread_limit=%u flag=%u\n",
1878 gtid, num_teams, thread_limit, flags));
1879 __kmpc_push_num_teams(&loc, gtid, num_teams, thread_limit);
1880 __kmpc_fork_teams(&loc, 2, (microtask_t)__kmp_GOMP_microtask_wrapper, fn,
1881 data);
1882 KA_TRACE(20, ("GOMP_teams_reg exit: T#%d\n", gtid));
1885 void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT_DEPEND)(void **depend) {
1886 MKLOC(loc, "GOMP_taskwait_depend");
1887 int gtid = __kmp_entry_gtid();
1888 KA_TRACE(20, ("GOMP_taskwait_depend: T#%d\n", gtid));
1889 kmp_gomp_depends_info_t gomp_depends(depend);
1890 kmp_int32 ndeps = gomp_depends.get_num_deps();
1891 kmp_depend_info_t dep_list[ndeps];
1892 for (kmp_int32 i = 0; i < ndeps; i++)
1893 dep_list[i] = gomp_depends.get_kmp_depend(i);
1894 #if OMPT_SUPPORT
1895 OMPT_STORE_RETURN_ADDRESS(gtid);
1896 #endif
1897 __kmpc_omp_wait_deps(&loc, gtid, ndeps, dep_list, 0, NULL);
1898 KA_TRACE(20, ("GOMP_taskwait_depend exit: T#%d\n", gtid));
1901 /* The following sections of code create aliases for the GOMP_* functions, then
1902 create versioned symbols using the assembler directive .symver. This is only
1903 pertinent for ELF .so library. The KMP_VERSION_SYMBOL macro is defined in
1904 kmp_os.h */
1906 #ifdef KMP_USE_VERSION_SYMBOLS
1907 // GOMP_1.0 versioned symbols
1908 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1909 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1910 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1911 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1912 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1913 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1914 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1915 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1916 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1917 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1918 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1919 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1920 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1921 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1922 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10,
1923 "GOMP_1.0");
1924 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1925 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1926 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1927 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10,
1928 "GOMP_1.0");
1929 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1930 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1931 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1932 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1933 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1934 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1935 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1936 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1937 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1938 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10,
1939 "GOMP_1.0");
1940 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10,
1941 "GOMP_1.0");
1942 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10,
1943 "GOMP_1.0");
1944 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10,
1945 "GOMP_1.0");
1946 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1947 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1948 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1949 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1950 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1951 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1952 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1953 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1954 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1956 // GOMP_2.0 versioned symbols
1957 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1958 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
1959 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1960 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1961 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1962 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1963 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20,
1964 "GOMP_2.0");
1965 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20,
1966 "GOMP_2.0");
1967 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20,
1968 "GOMP_2.0");
1969 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20,
1970 "GOMP_2.0");
1971 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20,
1972 "GOMP_2.0");
1973 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20,
1974 "GOMP_2.0");
1975 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20,
1976 "GOMP_2.0");
1977 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20,
1978 "GOMP_2.0");
1979 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1980 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1981 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1982 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1984 // GOMP_3.0 versioned symbols
1985 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1987 // GOMP_4.0 versioned symbols
1988 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1989 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1990 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1991 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1992 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1993 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1994 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1995 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1996 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1997 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1998 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1999 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
2000 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
2001 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
2002 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
2003 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
2004 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
2005 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
2007 // GOMP_4.5 versioned symbols
2008 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP, 45, "GOMP_4.5");
2009 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP_ULL, 45, "GOMP_4.5");
2010 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_POST, 45, "GOMP_4.5");
2011 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_WAIT, 45, "GOMP_4.5");
2012 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START, 45,
2013 "GOMP_4.5");
2014 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START, 45,
2015 "GOMP_4.5");
2016 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START, 45,
2017 "GOMP_4.5");
2018 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START, 45,
2019 "GOMP_4.5");
2020 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_POST, 45, "GOMP_4.5");
2021 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT, 45, "GOMP_4.5");
2022 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START, 45,
2023 "GOMP_4.5");
2024 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START, 45,
2025 "GOMP_4.5");
2026 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START, 45,
2027 "GOMP_4.5");
2028 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START, 45,
2029 "GOMP_4.5");
2030 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START, 45,
2031 "GOMP_4.5");
2032 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT, 45,
2033 "GOMP_4.5");
2034 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START, 45,
2035 "GOMP_4.5");
2036 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT, 45,
2037 "GOMP_4.5");
2038 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START, 45,
2039 "GOMP_4.5");
2040 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT, 45,
2041 "GOMP_4.5");
2042 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START, 45,
2043 "GOMP_4.5");
2044 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT, 45,
2045 "GOMP_4.5");
2046 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC, 45,
2047 "GOMP_4.5");
2048 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED, 45,
2049 "GOMP_4.5");
2051 // GOMP_5.0 versioned symbols
2052 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT, 50,
2053 "GOMP_5.0");
2054 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START, 50,
2055 "GOMP_5.0");
2056 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT, 50,
2057 "GOMP_5.0");
2058 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START, 50,
2059 "GOMP_5.0");
2060 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT,
2061 50, "GOMP_5.0");
2062 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START,
2063 50, "GOMP_5.0");
2064 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT, 50,
2065 "GOMP_5.0");
2066 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START, 50,
2067 "GOMP_5.0");
2068 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME, 50,
2069 "GOMP_5.0");
2070 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME,
2071 50, "GOMP_5.0");
2072 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS_REG, 50, "GOMP_5.0");
2073 KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT_DEPEND, 50, "GOMP_5.0");
2075 #endif // KMP_USE_VERSION_SYMBOLS
2077 #ifdef __cplusplus
2078 } // extern "C"
2079 #endif // __cplusplus