2 * omp-icv.cpp -- OMPD Internal Control Variable handling
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
13 /* clang-format expect kmp.h before omp.h which results in build break
14 * due to a few redeclarations.
16 #include "omp-debug.h"
17 // NOLINTNEXTLINE "to avoid clang tidy warning for the same reason as above."
19 #include "ompd-private.h"
20 #include "TargetValue.h"
24 /* The ICVs ompd-final-var and ompd-implicit-var below are for backward
25 * compatibility with 5.0.
28 #define FOREACH_OMPD_ICV(macro) \
29 macro(dyn_var, "dyn-var", ompd_scope_thread, 0) \
30 macro(run_sched_var, "run-sched-var", ompd_scope_task, 0) \
31 macro(stacksize_var, "stacksize-var", ompd_scope_address_space, 0) \
32 macro(cancel_var, "cancel-var", ompd_scope_address_space, 0) \
33 macro(max_task_priority_var, "max-task-priority-var", ompd_scope_address_space, 0)\
34 macro(debug_var, "debug-var", ompd_scope_address_space, 0) \
35 macro(nthreads_var, "nthreads-var", ompd_scope_thread, 0) \
36 macro(display_affinity_var, "display-affinity-var", ompd_scope_address_space, 0) \
37 macro(affinity_format_var, "affinity-format-var", ompd_scope_address_space, 0) \
38 macro(default_device_var, "default-device-var", ompd_scope_thread, 0) \
39 macro(tool_var, "tool-var", ompd_scope_address_space, 0) \
40 macro(tool_libraries_var, "tool-libraries-var", ompd_scope_address_space, 0) \
41 macro(tool_verbose_init_var, "tool-verbose-init-var", ompd_scope_address_space, 0)\
42 macro(levels_var, "levels-var", ompd_scope_parallel, 1) \
43 macro(active_levels_var, "active-levels-var", ompd_scope_parallel, 0) \
44 macro(thread_limit_var, "thread-limit-var", ompd_scope_task, 0) \
45 macro(max_active_levels_var, "max-active-levels-var", ompd_scope_task, 0) \
46 macro(bind_var, "bind-var", ompd_scope_task, 0) \
47 macro(num_procs_var, "num-procs-var", ompd_scope_address_space, 0) \
48 macro(ompd_num_procs_var, "ompd-num-procs-var", ompd_scope_address_space, 0) \
49 macro(thread_num_var, "thread-num-var", ompd_scope_thread, 1) \
50 macro(ompd_thread_num_var, "ompd-thread-num-var", ompd_scope_thread, 1) \
51 macro(final_var, "final-task-var", ompd_scope_task, 0) \
52 macro(ompd_final_var, "ompd-final-var", ompd_scope_task, 0) \
53 macro(ompd_final_task_var, "ompd-final-task-var", ompd_scope_task, 0) \
54 macro(implicit_var, "implicit-task-var", ompd_scope_task, 0) \
55 macro(ompd_implicit_var, "ompd-implicit-var", ompd_scope_task, 0) \
56 macro(ompd_implicit_task_var, "ompd-implicit-task-var", ompd_scope_task, 0) \
57 macro(team_size_var, "team-size-var", ompd_scope_parallel, 1) \
58 macro(ompd_team_size_var, "ompd-team-size-var", ompd_scope_parallel, 1)
60 void __ompd_init_icvs(const ompd_callbacks_t
*table
) { callbacks
= table
; }
63 ompd_icv_undefined_marker
=
64 0, // ompd_icv_undefined is already defined in ompd.h
65 #define ompd_icv_macro(v, n, s, d) ompd_icv_##v,
66 FOREACH_OMPD_ICV(ompd_icv_macro
)
68 ompd_icv_after_last_icv
71 static const char *ompd_icv_string_values
[] = {"undefined",
72 #define ompd_icv_macro(v, n, s, d) n,
73 FOREACH_OMPD_ICV(ompd_icv_macro
)
77 static const ompd_scope_t ompd_icv_scope_values
[] = {
78 ompd_scope_global
, // undefined marker
79 #define ompd_icv_macro(v, n, s, d) s,
80 FOREACH_OMPD_ICV(ompd_icv_macro
)
85 ompd_rc_t
ompd_enumerate_icvs(ompd_address_space_handle_t
*handle
,
86 ompd_icv_id_t current
, ompd_icv_id_t
*next_id
,
87 const char **next_icv_name
,
88 ompd_scope_t
*next_scope
, int *more
) {
90 return ompd_rc_stale_handle
;
92 if (!next_id
|| !next_icv_name
|| !next_scope
|| !more
) {
93 return ompd_rc_bad_input
;
95 if (current
+ 1 >= ompd_icv_after_last_icv
) {
96 return ompd_rc_bad_input
;
99 *next_id
= current
+ 1;
101 char *icv_name
= NULL
;
102 ompd_rc_t ret
= callbacks
->alloc_memory(
103 std::strlen(ompd_icv_string_values
[*next_id
]) + 1, (void **)&icv_name
);
104 *next_icv_name
= icv_name
;
105 if (ret
!= ompd_rc_ok
) {
108 std::strcpy(icv_name
, ompd_icv_string_values
[*next_id
]);
110 *next_scope
= ompd_icv_scope_values
[*next_id
];
112 if ((*next_id
) + 1 >= ompd_icv_after_last_icv
) {
121 static ompd_rc_t
create_empty_string(const char **empty_string_ptr
) {
126 return ompd_rc_callback_error
;
128 ret
= callbacks
->alloc_memory(1, (void **)&empty_str
);
129 if (ret
!= ompd_rc_ok
) {
133 *empty_string_ptr
= empty_str
;
137 static ompd_rc_t
ompd_get_dynamic(
138 ompd_thread_handle_t
*thread_handle
, /* IN: OpenMP thread handle */
139 ompd_word_t
*dyn_val
/* OUT: Dynamic adjustment of threads */
142 return ompd_rc_stale_handle
;
143 if (!thread_handle
->ah
)
144 return ompd_rc_stale_handle
;
145 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
147 return ompd_rc_stale_handle
;
149 return ompd_rc_callback_error
;
154 TValue(context
, thread_handle
->th
) /*__kmp_threads[t]->th*/
155 .cast("kmp_base_info_t")
156 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
157 .cast("kmp_taskdata_t", 1)
158 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
159 .cast("kmp_internal_control_t", 0)
161 "dynamic") /*__kmp_threads[t]->th.th_current_task->td_icvs.dynamic*/
169 ompd_get_stacksize(ompd_address_space_handle_t
170 *addr_handle
, /* IN: handle for the address space */
171 ompd_word_t
*stacksize_val
/* OUT: per thread stack size */
173 ompd_address_space_context_t
*context
= addr_handle
->context
;
175 return ompd_rc_stale_handle
;
178 return ompd_rc_callback_error
;
182 ret
= TValue(context
, "__kmp_stksize")
183 .castBase("__kmp_stksize")
184 .getValue(stacksize
);
185 *stacksize_val
= stacksize
;
189 static ompd_rc_t
ompd_get_cancellation(
190 ompd_address_space_handle_t
191 *addr_handle
, /* IN: handle for the address space */
192 ompd_word_t
*cancellation_val
/* OUT: cancellation value */
194 ompd_address_space_context_t
*context
= addr_handle
->context
;
196 return ompd_rc_stale_handle
;
198 return ompd_rc_callback_error
;
202 int omp_cancellation
;
203 ret
= TValue(context
, "__kmp_omp_cancellation")
204 .castBase("__kmp_omp_cancellation")
205 .getValue(omp_cancellation
);
206 *cancellation_val
= omp_cancellation
;
210 static ompd_rc_t
ompd_get_max_task_priority(
211 ompd_address_space_handle_t
212 *addr_handle
, /* IN: handle for the address space */
213 ompd_word_t
*max_task_priority_val
/* OUT: max task priority value */
215 ompd_address_space_context_t
*context
= addr_handle
->context
;
217 return ompd_rc_stale_handle
;
219 return ompd_rc_callback_error
;
223 int max_task_priority
;
224 ret
= TValue(context
, "__kmp_max_task_priority")
225 .castBase("__kmp_max_task_priority")
226 .getValue(max_task_priority
);
227 *max_task_priority_val
= max_task_priority
;
232 ompd_get_debug(ompd_address_space_handle_t
233 *addr_handle
, /* IN: handle for the address space */
234 ompd_word_t
*debug_val
/* OUT: debug value */
236 ompd_address_space_context_t
*context
= addr_handle
->context
;
238 return ompd_rc_stale_handle
;
240 return ompd_rc_callback_error
;
244 uint64_t ompd_state_val
;
245 ret
= TValue(context
, "ompd_state")
246 .castBase("ompd_state")
247 .getValue(ompd_state_val
);
248 if (ompd_state_val
> 0) {
256 /* Helper routine for the ompd_get_nthreads routines */
257 static ompd_rc_t
ompd_get_nthreads_aux(ompd_thread_handle_t
*thread_handle
,
259 uint32_t *current_nesting_level
,
262 return ompd_rc_stale_handle
;
263 if (!thread_handle
->ah
)
264 return ompd_rc_stale_handle
;
265 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
267 return ompd_rc_stale_handle
;
269 return ompd_rc_callback_error
;
272 ompd_rc_t ret
= TValue(context
, "__kmp_nested_nth")
273 .cast("kmp_nested_nthreads_t")
275 .castBase(ompd_type_int
)
277 if (ret
!= ompd_rc_ok
)
281 TValue(context
, thread_handle
->th
) /*__kmp_threads[t]->th*/
282 .cast("kmp_base_info_t")
283 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
284 .cast("kmp_taskdata_t", 1);
287 .access("td_team") /*__kmp_threads[t]->th.th_current_task.td_team*/
288 .cast("kmp_team_p", 1)
289 .access("t") /*__kmp_threads[t]->th.th_current_task.td_team->t*/
290 .cast("kmp_base_team_t", 0) /*t*/
291 .access("t_level") /*t.t_level*/
292 .castBase(ompd_type_int
)
293 .getValue(*current_nesting_level
);
294 if (ret
!= ompd_rc_ok
)
297 ret
= taskdata
.cast("kmp_taskdata_t", 1)
298 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
299 .cast("kmp_internal_control_t", 0)
301 "nproc") /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
302 .castBase(ompd_type_int
)
304 if (ret
!= ompd_rc_ok
)
310 static ompd_rc_t
ompd_get_nthreads(
311 ompd_thread_handle_t
*thread_handle
, /* IN: handle for the thread */
312 ompd_word_t
*nthreads_var_val
/* OUT: nthreads-var (of integer type)
317 uint32_t current_nesting_level
;
320 ret
= ompd_get_nthreads_aux(thread_handle
, &used
, ¤t_nesting_level
,
322 if (ret
!= ompd_rc_ok
)
325 /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
326 *nthreads_var_val
= nproc
;
327 /* If the nthreads-var is a list with more than one element, then the value of
328 this ICV cannot be represented by an integer type. In this case,
329 ompd_rc_incomplete is returned. The tool can check the return value and
330 can choose to invoke ompd_get_icv_string_from_scope() if needed. */
331 if (current_nesting_level
< used
- 1) {
332 return ompd_rc_incomplete
;
337 static ompd_rc_t
ompd_get_nthreads(
338 ompd_thread_handle_t
*thread_handle
, /* IN: handle for the thread */
339 const char **nthreads_list_string
/* OUT: string list of comma separated
344 uint32_t current_nesting_level
;
347 ret
= ompd_get_nthreads_aux(thread_handle
, &used
, ¤t_nesting_level
,
349 if (ret
!= ompd_rc_ok
)
352 uint32_t num_list_elems
;
353 if (used
== 0 || current_nesting_level
>= used
) {
356 num_list_elems
= used
- current_nesting_level
;
358 size_t buffer_size
= 16 /* digits per element including the comma separator */
360 1; /* string terminator NULL */
361 char *nthreads_list_str
;
362 ret
= callbacks
->alloc_memory(buffer_size
, (void **)&nthreads_list_str
);
363 if (ret
!= ompd_rc_ok
)
366 /* The nthreads-var list would be:
367 [__kmp_threads[t]->th.th_current_task->td_icvs.nproc,
368 __kmp_nested_nth.nth[current_nesting_level + 1],
369 __kmp_nested_nth.nth[current_nesting_level + 2],
371 __kmp_nested_nth.nth[used - 1]]*/
373 sprintf(nthreads_list_str
, "%d", nproc
);
374 *nthreads_list_string
= nthreads_list_str
;
375 if (num_list_elems
== 1) {
382 for (current_nesting_level
++; /* the list element for this nesting
383 * level has already been accounted for
385 current_nesting_level
< used
; current_nesting_level
++) {
387 ret
= TValue(thread_handle
->ah
->context
, "__kmp_nested_nth")
388 .cast("kmp_nested_nthreads_t")
391 .getArrayElement(current_nesting_level
)
392 .castBase(ompd_type_int
)
393 .getValue(nth_value
);
395 if (ret
!= ompd_rc_ok
)
398 sprintf(temp_value
, ",%d", nth_value
);
399 strcat(nthreads_list_str
, temp_value
);
405 static ompd_rc_t
ompd_get_display_affinity(
406 ompd_address_space_handle_t
407 *addr_handle
, /* IN: handle for the address space */
408 ompd_word_t
*display_affinity_val
/* OUT: display affinity value */
410 ompd_address_space_context_t
*context
= addr_handle
->context
;
412 return ompd_rc_stale_handle
;
416 return ompd_rc_callback_error
;
418 ret
= TValue(context
, "__kmp_display_affinity")
419 .castBase("__kmp_display_affinity")
420 .getValue(*display_affinity_val
);
424 static ompd_rc_t
ompd_get_affinity_format(
425 ompd_address_space_handle_t
*addr_handle
, /* IN: address space handle*/
426 const char **affinity_format_string
/* OUT: affinity format string */
428 ompd_address_space_context_t
*context
= addr_handle
->context
;
430 return ompd_rc_stale_handle
;
433 return ompd_rc_callback_error
;
436 ret
= TValue(context
, "__kmp_affinity_format")
438 .getString(affinity_format_string
);
442 static ompd_rc_t
ompd_get_tool_libraries(
443 ompd_address_space_handle_t
*addr_handle
, /* IN: address space handle*/
444 const char **tool_libraries_string
/* OUT: tool libraries string */
446 if (!tool_libraries_string
)
447 return ompd_rc_bad_input
;
449 ompd_address_space_context_t
*context
= addr_handle
->context
;
451 return ompd_rc_stale_handle
;
454 return ompd_rc_callback_error
;
457 ret
= TValue(context
, "__kmp_tool_libraries")
459 .getString(tool_libraries_string
);
460 if (ret
== ompd_rc_unsupported
) {
461 ret
= create_empty_string(tool_libraries_string
);
466 static ompd_rc_t
ompd_get_default_device(
467 ompd_thread_handle_t
*thread_handle
, /* IN: handle for the thread */
468 ompd_word_t
*default_device_val
/* OUT: default device value */
471 return ompd_rc_stale_handle
;
472 if (!thread_handle
->ah
)
473 return ompd_rc_stale_handle
;
474 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
476 return ompd_rc_stale_handle
;
478 return ompd_rc_callback_error
;
481 TValue(context
, thread_handle
->th
) /*__kmp_threads[t]->th*/
482 .cast("kmp_base_info_t")
483 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
484 .cast("kmp_taskdata_t", 1)
485 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
486 .cast("kmp_internal_control_t", 0)
487 /*__kmp_threads[t]->th.th_current_task->td_icvs.default_device*/
488 .access("default_device")
490 .getValue(*default_device_val
);
495 ompd_get_tool(ompd_address_space_handle_t
496 *addr_handle
, /* IN: handle for the address space */
497 ompd_word_t
*tool_val
/* OUT: tool value */
499 ompd_address_space_context_t
*context
= addr_handle
->context
;
501 return ompd_rc_stale_handle
;
503 return ompd_rc_callback_error
;
508 TValue(context
, "__kmp_tool").castBase("__kmp_tool").getValue(*tool_val
);
512 static ompd_rc_t
ompd_get_tool_verbose_init(
513 ompd_address_space_handle_t
*addr_handle
, /* IN: address space handle*/
514 const char **tool_verbose_init_string
/* OUT: tool verbose init string */
516 ompd_address_space_context_t
*context
= addr_handle
->context
;
518 return ompd_rc_stale_handle
;
521 return ompd_rc_callback_error
;
524 ret
= TValue(context
, "__kmp_tool_verbose_init")
526 .getString(tool_verbose_init_string
);
527 if (ret
== ompd_rc_unsupported
) {
528 ret
= create_empty_string(tool_verbose_init_string
);
533 static ompd_rc_t
ompd_get_level(
534 ompd_parallel_handle_t
*parallel_handle
, /* IN: OpenMP parallel handle */
535 ompd_word_t
*val
/* OUT: nesting level */
537 if (!parallel_handle
->ah
)
538 return ompd_rc_stale_handle
;
539 ompd_address_space_context_t
*context
= parallel_handle
->ah
->context
;
541 return ompd_rc_stale_handle
;
544 return ompd_rc_callback_error
;
549 ompd_rc_t ret
= TValue(context
, parallel_handle
->th
)
550 .cast("kmp_base_team_t", 0) /*t*/
551 .access("t_level") /*t.t_level*/
558 static ompd_rc_t
ompd_get_active_level(
559 ompd_parallel_handle_t
*parallel_handle
, /* IN: OpenMP parallel handle */
560 ompd_word_t
*val
/* OUT: active nesting level */
562 if (!parallel_handle
->ah
)
563 return ompd_rc_stale_handle
;
564 ompd_address_space_context_t
*context
= parallel_handle
->ah
->context
;
566 return ompd_rc_stale_handle
;
568 return ompd_rc_callback_error
;
573 ompd_rc_t ret
= TValue(context
, parallel_handle
->th
)
574 .cast("kmp_base_team_t", 0) /*t*/
575 .access("t_active_level") /*t.t_active_level*/
583 ompd_get_num_procs(ompd_address_space_handle_t
584 *addr_handle
, /* IN: handle for the address space */
585 ompd_word_t
*val
/* OUT: number of processes */
587 ompd_address_space_context_t
*context
= addr_handle
->context
;
589 return ompd_rc_stale_handle
;
591 return ompd_rc_callback_error
;
595 return ompd_rc_bad_input
;
599 ret
= TValue(context
, "__kmp_avail_proc")
600 .castBase("__kmp_avail_proc")
606 static ompd_rc_t
ompd_get_thread_limit(
607 ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
608 ompd_word_t
*val
/* OUT: max number of threads */
610 if (!task_handle
->ah
)
611 return ompd_rc_stale_handle
;
612 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
614 return ompd_rc_stale_handle
;
616 return ompd_rc_callback_error
;
619 ompd_rc_t ret
= TValue(context
, task_handle
->th
)
620 .cast("kmp_taskdata_t") // td
621 .access("td_icvs") // td->td_icvs
622 .cast("kmp_internal_control_t", 0)
623 .access("thread_limit") // td->td_icvs.thread_limit
630 static ompd_rc_t
ompd_get_thread_num(
631 ompd_thread_handle_t
*thread_handle
, /* IN: OpenMP thread handle*/
632 ompd_word_t
*val
/* OUT: number of the thread within the team */
635 return ompd_rc_stale_handle
;
636 if (!thread_handle
->ah
)
637 return ompd_rc_stale_handle
;
638 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
640 return ompd_rc_stale_handle
;
642 return ompd_rc_callback_error
;
646 TValue(context
, thread_handle
->th
) /*__kmp_threads[t]->th*/
647 .cast("kmp_base_info_t")
648 .access("th_info") /*__kmp_threads[t]->th.th_info*/
650 .access("ds") /*__kmp_threads[t]->th.th_info.ds*/
651 .cast("kmp_desc_base_t")
652 .access("ds_tid") /*__kmp_threads[t]->th.th_info.ds.ds_tid*/
659 ompd_in_final(ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
660 ompd_word_t
*val
/* OUT: max number of threads */
662 if (!task_handle
->ah
)
663 return ompd_rc_stale_handle
;
664 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
666 return ompd_rc_stale_handle
;
668 return ompd_rc_callback_error
;
671 ompd_rc_t ret
= TValue(context
, task_handle
->th
)
672 .cast("kmp_taskdata_t") // td
673 .access("td_flags") // td->td_flags
674 .cast("kmp_tasking_flags_t")
675 .check("final", val
); // td->td_flags.tasktype
680 static ompd_rc_t
ompd_get_max_active_levels(
681 ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
682 ompd_word_t
*val
/* OUT: max number of threads */
684 if (!task_handle
->ah
)
685 return ompd_rc_stale_handle
;
686 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
688 return ompd_rc_stale_handle
;
690 return ompd_rc_callback_error
;
694 TValue(context
, task_handle
->th
)
695 .cast("kmp_taskdata_t") // td
696 .access("td_icvs") // td->td_icvs
697 .cast("kmp_internal_control_t", 0)
698 .access("max_active_levels") // td->td_icvs.max_active_levels
705 static ompd_rc_t
ompd_get_run_schedule(
706 ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
707 const char **run_sched_string
/* OUT: Run Schedule String
708 consisting of kind and modifier */
710 if (!task_handle
->ah
)
711 return ompd_rc_stale_handle
;
712 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
714 return ompd_rc_stale_handle
;
716 return ompd_rc_callback_error
;
721 TValue sched
= TValue(context
, task_handle
->th
)
722 .cast("kmp_taskdata_t") // td
723 .access("td_icvs") // td->td_icvs
724 .cast("kmp_internal_control_t", 0)
725 .access("sched") // td->td_icvs.sched
726 .cast("kmp_r_sched_t", 0);
728 ompd_rc_t ret
= sched
729 .access("r_sched_type") // td->td_icvs.sched.r_sched_type
732 if (ret
!= ompd_rc_ok
) {
737 .access("chunk") // td->td_icvs.sched.chunk
740 if (ret
!= ompd_rc_ok
) {
743 char *run_sched_var_string
;
744 ret
= callbacks
->alloc_memory(100, (void **)&run_sched_var_string
);
745 if (ret
!= ompd_rc_ok
) {
748 run_sched_var_string
[0] = '\0';
749 if (SCHEDULE_HAS_MONOTONIC(kind
)) {
750 strcpy(run_sched_var_string
, "monotonic:");
751 } else if (SCHEDULE_HAS_NONMONOTONIC(kind
)) {
752 strcpy(run_sched_var_string
, "nonmonotonic:");
755 bool static_unchunked
= false;
756 switch (SCHEDULE_WITHOUT_MODIFIERS(kind
)) {
758 case kmp_sch_static_greedy
:
759 case kmp_sch_static_balanced
:
760 static_unchunked
= true;
761 strcat(run_sched_var_string
, "static");
763 case kmp_sch_static_chunked
:
764 strcat(run_sched_var_string
, "static");
766 case kmp_sch_dynamic_chunked
:
767 strcat(run_sched_var_string
, "dynamic");
769 case kmp_sch_guided_chunked
:
770 case kmp_sch_guided_iterative_chunked
:
771 case kmp_sch_guided_analytical_chunked
:
772 strcat(run_sched_var_string
, "guided");
775 strcat(run_sched_var_string
, "auto");
777 case kmp_sch_trapezoidal
:
778 strcat(run_sched_var_string
, "trapezoidal");
780 case kmp_sch_static_steal
:
781 strcat(run_sched_var_string
, "static_steal");
784 ret
= callbacks
->free_memory((void *)(run_sched_var_string
));
785 if (ret
!= ompd_rc_ok
) {
788 ret
= create_empty_string(run_sched_string
);
792 if (static_unchunked
== true) {
793 // To be in sync with what OMPT returns.
794 // Chunk was not set. Shown with a zero value.
799 sprintf(temp_str
, ",%d", chunk
);
800 strcat(run_sched_var_string
, temp_str
);
801 *run_sched_string
= run_sched_var_string
;
805 /* Helper routine for the ompd_get_proc_bind routines */
806 static ompd_rc_t
ompd_get_proc_bind_aux(ompd_task_handle_t
*task_handle
,
808 uint32_t *current_nesting_level
,
809 uint32_t *proc_bind
) {
810 if (!task_handle
->ah
)
811 return ompd_rc_stale_handle
;
812 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
814 return ompd_rc_stale_handle
;
816 return ompd_rc_callback_error
;
819 ompd_rc_t ret
= TValue(context
, "__kmp_nested_proc_bind")
820 .cast("kmp_nested_proc_bind_t")
822 .castBase(ompd_type_int
)
824 if (ret
!= ompd_rc_ok
)
827 TValue taskdata
= TValue(context
, task_handle
->th
) /* td */
828 .cast("kmp_taskdata_t");
831 .access("td_team") /* td->td_team*/
832 .cast("kmp_team_p", 1)
833 .access("t") /* td->td_team->t*/
834 .cast("kmp_base_team_t", 0) /*t*/
835 .access("t_level") /*t.t_level*/
836 .castBase(ompd_type_int
)
837 .getValue(*current_nesting_level
);
838 if (ret
!= ompd_rc_ok
)
842 .access("td_icvs") /* td->td_icvs */
843 .cast("kmp_internal_control_t", 0)
844 .access("proc_bind") /* td->td_icvs.proc_bind */
846 .getValue(*proc_bind
);
851 ompd_get_proc_bind(ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle */
852 ompd_word_t
*bind
/* OUT: Kind of proc-binding */
856 uint32_t current_nesting_level
;
859 ret
= ompd_get_proc_bind_aux(task_handle
, &used
, ¤t_nesting_level
,
861 if (ret
!= ompd_rc_ok
)
865 /* If bind-var is a list with more than one element, then the value of
866 this ICV cannot be represented by an integer type. In this case,
867 ompd_rc_incomplete is returned. The tool can check the return value and
868 can choose to invoke ompd_get_icv_string_from_scope() if needed. */
869 if (current_nesting_level
< used
- 1) {
870 return ompd_rc_incomplete
;
875 static ompd_rc_t
ompd_get_proc_bind(
876 ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle */
877 const char **proc_bind_list_string
/* OUT: string list of comma separated
882 uint32_t current_nesting_level
;
885 ret
= ompd_get_proc_bind_aux(task_handle
, &used
, ¤t_nesting_level
,
887 if (ret
!= ompd_rc_ok
)
890 uint32_t num_list_elems
;
891 if (used
== 0 || current_nesting_level
>= used
) {
894 num_list_elems
= used
- current_nesting_level
;
896 size_t buffer_size
= 16 /* digits per element including the comma separator */
898 1; /* string terminator NULL */
899 char *proc_bind_list_str
;
900 ret
= callbacks
->alloc_memory(buffer_size
, (void **)&proc_bind_list_str
);
901 if (ret
!= ompd_rc_ok
)
904 /* The bind-var list would be:
905 [td->td_icvs.proc_bind,
906 __kmp_nested_proc_bind.bind_types[current_nesting_level + 1],
907 __kmp_nested_proc_bind.bind_types[current_nesting_level + 2],
909 __kmp_nested_proc_bind.bind_types[used - 1]]*/
911 sprintf(proc_bind_list_str
, "%d", proc_bind
);
912 *proc_bind_list_string
= proc_bind_list_str
;
913 if (num_list_elems
== 1) {
918 uint32_t bind_types_value
;
920 for (current_nesting_level
++; /* the list element for this nesting
921 level has already been accounted for
923 current_nesting_level
< used
; current_nesting_level
++) {
925 ret
= TValue(task_handle
->ah
->context
, "__kmp_nested_proc_bind")
926 .cast("kmp_nested_proc_bind_t")
927 .access("bind_types")
929 .getArrayElement(current_nesting_level
)
930 .castBase(ompd_type_int
)
931 .getValue(bind_types_value
);
933 if (ret
!= ompd_rc_ok
)
936 sprintf(temp_value
, ",%d", bind_types_value
);
937 strcat(proc_bind_list_str
, temp_value
);
944 ompd_is_implicit(ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
945 ompd_word_t
*val
/* OUT: max number of threads */
948 return ompd_rc_stale_handle
;
949 if (!task_handle
->ah
)
950 return ompd_rc_stale_handle
;
951 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
953 return ompd_rc_stale_handle
;
955 return ompd_rc_callback_error
;
958 ompd_rc_t ret
= TValue(context
, task_handle
->th
)
959 .cast("kmp_taskdata_t") // td
960 .access("td_flags") // td->td_flags
961 .cast("kmp_tasking_flags_t")
962 .check("tasktype", val
); // td->td_flags.tasktype
963 *val
^= 1; // tasktype: explicit = 1, implicit = 0 => invert the value
967 ompd_rc_t
ompd_get_num_threads(
968 ompd_parallel_handle_t
*parallel_handle
, /* IN: OpenMP parallel handle */
969 ompd_word_t
*val
/* OUT: number of threads */
971 if (!parallel_handle
->ah
)
972 return ompd_rc_stale_handle
;
973 ompd_address_space_context_t
*context
= parallel_handle
->ah
->context
;
975 return ompd_rc_stale_handle
;
977 return ompd_rc_callback_error
;
980 ompd_rc_t ret
= ompd_rc_ok
;
981 if (parallel_handle
->lwt
.address
!= 0) {
985 ret
= TValue(context
, parallel_handle
->th
)
986 .cast("kmp_base_team_t", 0) /*t*/
987 .access("t_nproc") /*t.t_nproc*/
995 ompd_rc_t
ompd_get_icv_from_scope(void *handle
, ompd_scope_t scope
,
996 ompd_icv_id_t icv_id
,
997 ompd_word_t
*icv_value
) {
999 return ompd_rc_stale_handle
;
1001 if (icv_id
>= ompd_icv_after_last_icv
|| icv_id
== 0) {
1002 return ompd_rc_bad_input
;
1004 if (scope
!= ompd_icv_scope_values
[icv_id
]) {
1005 return ompd_rc_bad_input
;
1008 ompd_device_t device_kind
;
1011 case ompd_scope_thread
:
1012 device_kind
= ((ompd_thread_handle_t
*)handle
)->ah
->kind
;
1014 case ompd_scope_parallel
:
1015 device_kind
= ((ompd_parallel_handle_t
*)handle
)->ah
->kind
;
1017 case ompd_scope_address_space
:
1018 device_kind
= ((ompd_address_space_handle_t
*)handle
)->kind
;
1020 case ompd_scope_task
:
1021 device_kind
= ((ompd_task_handle_t
*)handle
)->ah
->kind
;
1024 return ompd_rc_bad_input
;
1027 if (device_kind
== OMPD_DEVICE_KIND_HOST
) {
1029 case ompd_icv_dyn_var
:
1030 return ompd_get_dynamic((ompd_thread_handle_t
*)handle
, icv_value
);
1031 case ompd_icv_run_sched_var
:
1032 return ompd_rc_incompatible
;
1033 case ompd_icv_stacksize_var
:
1034 return ompd_get_stacksize((ompd_address_space_handle_t
*)handle
,
1036 case ompd_icv_cancel_var
:
1037 return ompd_get_cancellation((ompd_address_space_handle_t
*)handle
,
1039 case ompd_icv_max_task_priority_var
:
1040 return ompd_get_max_task_priority((ompd_address_space_handle_t
*)handle
,
1042 case ompd_icv_debug_var
:
1043 return ompd_get_debug((ompd_address_space_handle_t
*)handle
, icv_value
);
1044 case ompd_icv_nthreads_var
:
1045 return ompd_get_nthreads((ompd_thread_handle_t
*)handle
, icv_value
);
1046 case ompd_icv_display_affinity_var
:
1047 return ompd_get_display_affinity((ompd_address_space_handle_t
*)handle
,
1049 case ompd_icv_affinity_format_var
:
1050 return ompd_rc_incompatible
;
1051 case ompd_icv_tool_libraries_var
:
1052 return ompd_rc_incompatible
;
1053 case ompd_icv_default_device_var
:
1054 return ompd_get_default_device((ompd_thread_handle_t
*)handle
, icv_value
);
1055 case ompd_icv_tool_var
:
1056 return ompd_get_tool((ompd_address_space_handle_t
*)handle
, icv_value
);
1057 case ompd_icv_tool_verbose_init_var
:
1058 return ompd_rc_incompatible
;
1059 case ompd_icv_levels_var
:
1060 return ompd_get_level((ompd_parallel_handle_t
*)handle
, icv_value
);
1061 case ompd_icv_active_levels_var
:
1062 return ompd_get_active_level((ompd_parallel_handle_t
*)handle
, icv_value
);
1063 case ompd_icv_thread_limit_var
:
1064 return ompd_get_thread_limit((ompd_task_handle_t
*)handle
, icv_value
);
1065 case ompd_icv_max_active_levels_var
:
1066 return ompd_get_max_active_levels((ompd_task_handle_t
*)handle
,
1068 case ompd_icv_bind_var
:
1069 return ompd_get_proc_bind((ompd_task_handle_t
*)handle
, icv_value
);
1070 case ompd_icv_num_procs_var
:
1071 case ompd_icv_ompd_num_procs_var
:
1072 return ompd_get_num_procs((ompd_address_space_handle_t
*)handle
,
1074 case ompd_icv_thread_num_var
:
1075 case ompd_icv_ompd_thread_num_var
:
1076 return ompd_get_thread_num((ompd_thread_handle_t
*)handle
, icv_value
);
1077 case ompd_icv_final_var
:
1078 case ompd_icv_ompd_final_var
:
1079 case ompd_icv_ompd_final_task_var
:
1080 return ompd_in_final((ompd_task_handle_t
*)handle
, icv_value
);
1081 case ompd_icv_implicit_var
:
1082 case ompd_icv_ompd_implicit_var
:
1083 case ompd_icv_ompd_implicit_task_var
:
1084 return ompd_is_implicit((ompd_task_handle_t
*)handle
, icv_value
);
1085 case ompd_icv_team_size_var
:
1086 case ompd_icv_ompd_team_size_var
:
1087 return ompd_get_num_threads((ompd_parallel_handle_t
*)handle
, icv_value
);
1089 return ompd_rc_unsupported
;
1092 return ompd_rc_unsupported
;
1095 ompd_rc_t
ompd_get_icv_string_from_scope(void *handle
, ompd_scope_t scope
,
1096 ompd_icv_id_t icv_id
,
1097 const char **icv_string
) {
1099 return ompd_rc_stale_handle
;
1101 if (icv_id
>= ompd_icv_after_last_icv
|| icv_id
== 0) {
1102 return ompd_rc_bad_input
;
1104 if (scope
!= ompd_icv_scope_values
[icv_id
]) {
1105 return ompd_rc_bad_input
;
1108 ompd_device_t device_kind
;
1111 case ompd_scope_thread
:
1112 device_kind
= ((ompd_thread_handle_t
*)handle
)->ah
->kind
;
1114 case ompd_scope_parallel
:
1115 device_kind
= ((ompd_parallel_handle_t
*)handle
)->ah
->kind
;
1117 case ompd_scope_address_space
:
1118 device_kind
= ((ompd_address_space_handle_t
*)handle
)->kind
;
1120 case ompd_scope_task
:
1121 device_kind
= ((ompd_task_handle_t
*)handle
)->ah
->kind
;
1124 return ompd_rc_bad_input
;
1127 if (device_kind
== OMPD_DEVICE_KIND_HOST
) {
1129 case ompd_icv_run_sched_var
:
1130 return ompd_get_run_schedule((ompd_task_handle_t
*)handle
, icv_string
);
1131 case ompd_icv_nthreads_var
:
1132 return ompd_get_nthreads((ompd_thread_handle_t
*)handle
, icv_string
);
1133 case ompd_icv_bind_var
:
1134 return ompd_get_proc_bind((ompd_task_handle_t
*)handle
, icv_string
);
1135 case ompd_icv_affinity_format_var
:
1136 return ompd_get_affinity_format((ompd_address_space_handle_t
*)handle
,
1138 case ompd_icv_tool_libraries_var
:
1139 return ompd_get_tool_libraries((ompd_address_space_handle_t
*)handle
,
1141 case ompd_icv_tool_verbose_init_var
:
1142 return ompd_get_tool_verbose_init((ompd_address_space_handle_t
*)handle
,
1145 return ompd_rc_unsupported
;
1148 return ompd_rc_unsupported
;
1151 static ompd_rc_t
__ompd_get_tool_data(TValue
&dataValue
, ompd_word_t
*value
,
1152 ompd_address_t
*ptr
) {
1153 ompd_rc_t ret
= dataValue
.getError();
1154 if (ret
!= ompd_rc_ok
)
1156 ret
= dataValue
.access("value").castBase().getValue(*value
);
1157 if (ret
!= ompd_rc_ok
)
1159 ptr
->segment
= OMPD_SEGMENT_UNSPECIFIED
;
1160 ret
= dataValue
.access("ptr").castBase().getValue(ptr
->address
);
1164 ompd_rc_t
ompd_get_task_data(ompd_task_handle_t
*task_handle
,
1165 ompd_word_t
*value
, ompd_address_t
*ptr
) {
1166 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
1168 return ompd_rc_stale_handle
;
1170 return ompd_rc_callback_error
;
1174 if (task_handle
->lwt
.address
) {
1175 dataValue
= TValue(context
, task_handle
->lwt
)
1176 .cast("ompt_lw_taskteam_t") /*lwt*/
1177 .access("ompt_task_info") // lwt->ompt_task_info
1178 .cast("ompt_task_info_t")
1179 .access("task_data") // lwt->ompd_task_info.task_data
1180 .cast("ompt_data_t");
1182 dataValue
= TValue(context
, task_handle
->th
)
1183 .cast("kmp_taskdata_t") /*td*/
1184 .access("ompt_task_info") // td->ompt_task_info
1185 .cast("ompt_task_info_t")
1186 .access("task_data") // td->ompd_task_info.task_data
1187 .cast("ompt_data_t");
1189 return __ompd_get_tool_data(dataValue
, value
, ptr
);
1192 ompd_rc_t
ompd_get_parallel_data(ompd_parallel_handle_t
*parallel_handle
,
1193 ompd_word_t
*value
, ompd_address_t
*ptr
) {
1194 ompd_address_space_context_t
*context
= parallel_handle
->ah
->context
;
1196 return ompd_rc_stale_handle
;
1198 return ompd_rc_callback_error
;
1202 if (parallel_handle
->lwt
.address
) {
1204 TValue(context
, parallel_handle
->lwt
)
1205 .cast("ompt_lw_taskteam_t") /*lwt*/
1206 .access("ompt_team_info") // lwt->ompt_team_info
1207 .cast("ompt_team_info_t")
1208 .access("parallel_data") // lwt->ompt_team_info.parallel_data
1209 .cast("ompt_data_t");
1211 dataValue
= TValue(context
, parallel_handle
->th
)
1212 .cast("kmp_base_team_t") /*t*/
1213 .access("ompt_team_info") // t->ompt_team_info
1214 .cast("ompt_team_info_t")
1215 .access("parallel_data") // t->ompt_team_info.parallel_data
1216 .cast("ompt_data_t");
1218 return __ompd_get_tool_data(dataValue
, value
, ptr
);
1221 ompd_rc_t
ompd_get_thread_data(ompd_thread_handle_t
*thread_handle
,
1222 ompd_word_t
*value
, ompd_address_t
*ptr
) {
1223 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
1225 return ompd_rc_stale_handle
;
1227 return ompd_rc_callback_error
;
1231 TValue(context
, thread_handle
->th
)
1232 .cast("kmp_base_info_t") /*th*/
1233 .access("ompt_thread_info") // th->ompt_thread_info
1234 .cast("ompt_thread_info_t")
1235 .access("thread_data") // th->ompt_thread_info.thread_data
1236 .cast("ompt_data_t");
1237 return __ompd_get_tool_data(dataValue
, value
, ptr
);
1240 ompd_rc_t
ompd_get_tool_data(void *handle
, ompd_scope_t scope
,
1241 ompd_word_t
*value
, ompd_address_t
*ptr
) {
1243 return ompd_rc_stale_handle
;
1246 ompd_device_t device_kind
;
1249 case ompd_scope_thread
:
1250 device_kind
= ((ompd_thread_handle_t
*)handle
)->ah
->kind
;
1252 case ompd_scope_parallel
:
1253 device_kind
= ((ompd_parallel_handle_t
*)handle
)->ah
->kind
;
1255 case ompd_scope_task
:
1256 device_kind
= ((ompd_task_handle_t
*)handle
)->ah
->kind
;
1259 return ompd_rc_bad_input
;
1262 if (device_kind
== OMPD_DEVICE_KIND_HOST
) {
1264 case ompd_scope_thread
:
1265 return ompd_get_thread_data((ompd_thread_handle_t
*)handle
, value
, ptr
);
1266 case ompd_scope_parallel
:
1267 return ompd_get_parallel_data((ompd_parallel_handle_t
*)handle
, value
,
1269 case ompd_scope_task
:
1270 return ompd_get_task_data((ompd_task_handle_t
*)handle
, value
, ptr
);
1272 return ompd_rc_unsupported
;
1275 return ompd_rc_unsupported
;