2 * omp-icv.cpp -- OMPD Internal Control Variable handling
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
13 /* clang-format expect kmp.h before omp.h which results in build break
14 * due to a few redeclarations.
16 #include "omp-debug.h"
17 // NOLINTNEXTLINE "to avoid clang tidy warning for the same reason as above."
19 #include "ompd-private.h"
20 #include "TargetValue.h"
21 #define OMPD_SKIP_HWLOC 1
23 #undef OMPD_SKIP_HWLOC
26 /* The ICVs ompd-final-var and ompd-implicit-var below are for backward
27 * compatibility with 5.0.
30 #define FOREACH_OMPD_ICV(macro) \
31 macro(dyn_var, "dyn-var", ompd_scope_thread, 0) \
32 macro(run_sched_var, "run-sched-var", ompd_scope_task, 0) \
33 macro(stacksize_var, "stacksize-var", ompd_scope_address_space, 0) \
34 macro(cancel_var, "cancel-var", ompd_scope_address_space, 0) \
35 macro(max_task_priority_var, "max-task-priority-var", ompd_scope_address_space, 0)\
36 macro(debug_var, "debug-var", ompd_scope_address_space, 0) \
37 macro(nthreads_var, "nthreads-var", ompd_scope_thread, 0) \
38 macro(display_affinity_var, "display-affinity-var", ompd_scope_address_space, 0) \
39 macro(affinity_format_var, "affinity-format-var", ompd_scope_address_space, 0) \
40 macro(default_device_var, "default-device-var", ompd_scope_thread, 0) \
41 macro(tool_var, "tool-var", ompd_scope_address_space, 0) \
42 macro(tool_libraries_var, "tool-libraries-var", ompd_scope_address_space, 0) \
43 macro(tool_verbose_init_var, "tool-verbose-init-var", ompd_scope_address_space, 0)\
44 macro(levels_var, "levels-var", ompd_scope_parallel, 1) \
45 macro(active_levels_var, "active-levels-var", ompd_scope_parallel, 0) \
46 macro(thread_limit_var, "thread-limit-var", ompd_scope_task, 0) \
47 macro(max_active_levels_var, "max-active-levels-var", ompd_scope_task, 0) \
48 macro(bind_var, "bind-var", ompd_scope_task, 0) \
49 macro(num_procs_var, "num-procs-var", ompd_scope_address_space, 0) \
50 macro(ompd_num_procs_var, "ompd-num-procs-var", ompd_scope_address_space, 0) \
51 macro(thread_num_var, "thread-num-var", ompd_scope_thread, 1) \
52 macro(ompd_thread_num_var, "ompd-thread-num-var", ompd_scope_thread, 1) \
53 macro(final_var, "final-task-var", ompd_scope_task, 0) \
54 macro(ompd_final_var, "ompd-final-var", ompd_scope_task, 0) \
55 macro(ompd_final_task_var, "ompd-final-task-var", ompd_scope_task, 0) \
56 macro(implicit_var, "implicit-task-var", ompd_scope_task, 0) \
57 macro(ompd_implicit_var, "ompd-implicit-var", ompd_scope_task, 0) \
58 macro(ompd_implicit_task_var, "ompd-implicit-task-var", ompd_scope_task, 0) \
59 macro(team_size_var, "team-size-var", ompd_scope_parallel, 1) \
60 macro(ompd_team_size_var, "ompd-team-size-var", ompd_scope_parallel, 1)
62 void __ompd_init_icvs(const ompd_callbacks_t
*table
) { callbacks
= table
; }
65 ompd_icv_undefined_marker
=
66 0, // ompd_icv_undefined is already defined in ompd.h
67 #define ompd_icv_macro(v, n, s, d) ompd_icv_##v,
68 FOREACH_OMPD_ICV(ompd_icv_macro
)
70 ompd_icv_after_last_icv
73 static const char *ompd_icv_string_values
[] = {"undefined",
74 #define ompd_icv_macro(v, n, s, d) n,
75 FOREACH_OMPD_ICV(ompd_icv_macro
)
79 static const ompd_scope_t ompd_icv_scope_values
[] = {
80 ompd_scope_global
, // undefined marker
81 #define ompd_icv_macro(v, n, s, d) s,
82 FOREACH_OMPD_ICV(ompd_icv_macro
)
87 ompd_rc_t
ompd_enumerate_icvs(ompd_address_space_handle_t
*handle
,
88 ompd_icv_id_t current
, ompd_icv_id_t
*next_id
,
89 const char **next_icv_name
,
90 ompd_scope_t
*next_scope
, int *more
) {
92 return ompd_rc_stale_handle
;
94 if (!next_id
|| !next_icv_name
|| !next_scope
|| !more
) {
95 return ompd_rc_bad_input
;
97 if (current
+ 1 >= ompd_icv_after_last_icv
) {
98 return ompd_rc_bad_input
;
101 *next_id
= current
+ 1;
103 char *icv_name
= NULL
;
104 ompd_rc_t ret
= callbacks
->alloc_memory(
105 std::strlen(ompd_icv_string_values
[*next_id
]) + 1, (void **)&icv_name
);
106 *next_icv_name
= icv_name
;
107 if (ret
!= ompd_rc_ok
) {
110 std::strcpy(icv_name
, ompd_icv_string_values
[*next_id
]);
112 *next_scope
= ompd_icv_scope_values
[*next_id
];
114 if ((*next_id
) + 1 >= ompd_icv_after_last_icv
) {
123 static ompd_rc_t
create_empty_string(const char **empty_string_ptr
) {
128 return ompd_rc_callback_error
;
130 ret
= callbacks
->alloc_memory(1, (void **)&empty_str
);
131 if (ret
!= ompd_rc_ok
) {
135 *empty_string_ptr
= empty_str
;
139 static ompd_rc_t
ompd_get_dynamic(
140 ompd_thread_handle_t
*thread_handle
, /* IN: OpenMP thread handle */
141 ompd_word_t
*dyn_val
/* OUT: Dynamic adjustment of threads */
144 return ompd_rc_stale_handle
;
145 if (!thread_handle
->ah
)
146 return ompd_rc_stale_handle
;
147 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
149 return ompd_rc_stale_handle
;
151 return ompd_rc_callback_error
;
156 TValue(context
, thread_handle
->th
) /*__kmp_threads[t]->th*/
157 .cast("kmp_base_info_t")
158 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
159 .cast("kmp_taskdata_t", 1)
160 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
161 .cast("kmp_internal_control_t", 0)
163 "dynamic") /*__kmp_threads[t]->th.th_current_task->td_icvs.dynamic*/
171 ompd_get_stacksize(ompd_address_space_handle_t
172 *addr_handle
, /* IN: handle for the address space */
173 ompd_word_t
*stacksize_val
/* OUT: per thread stack size */
175 ompd_address_space_context_t
*context
= addr_handle
->context
;
177 return ompd_rc_stale_handle
;
180 return ompd_rc_callback_error
;
184 ret
= TValue(context
, "__kmp_stksize")
185 .castBase("__kmp_stksize")
186 .getValue(stacksize
);
187 *stacksize_val
= stacksize
;
191 static ompd_rc_t
ompd_get_cancellation(
192 ompd_address_space_handle_t
193 *addr_handle
, /* IN: handle for the address space */
194 ompd_word_t
*cancellation_val
/* OUT: cancellation value */
196 ompd_address_space_context_t
*context
= addr_handle
->context
;
198 return ompd_rc_stale_handle
;
200 return ompd_rc_callback_error
;
204 int omp_cancellation
;
205 ret
= TValue(context
, "__kmp_omp_cancellation")
206 .castBase("__kmp_omp_cancellation")
207 .getValue(omp_cancellation
);
208 *cancellation_val
= omp_cancellation
;
212 static ompd_rc_t
ompd_get_max_task_priority(
213 ompd_address_space_handle_t
214 *addr_handle
, /* IN: handle for the address space */
215 ompd_word_t
*max_task_priority_val
/* OUT: max task priority value */
217 ompd_address_space_context_t
*context
= addr_handle
->context
;
219 return ompd_rc_stale_handle
;
221 return ompd_rc_callback_error
;
225 int max_task_priority
;
226 ret
= TValue(context
, "__kmp_max_task_priority")
227 .castBase("__kmp_max_task_priority")
228 .getValue(max_task_priority
);
229 *max_task_priority_val
= max_task_priority
;
234 ompd_get_debug(ompd_address_space_handle_t
235 *addr_handle
, /* IN: handle for the address space */
236 ompd_word_t
*debug_val
/* OUT: debug value */
238 ompd_address_space_context_t
*context
= addr_handle
->context
;
240 return ompd_rc_stale_handle
;
242 return ompd_rc_callback_error
;
246 uint64_t ompd_state_val
;
247 ret
= TValue(context
, "ompd_state")
248 .castBase("ompd_state")
249 .getValue(ompd_state_val
);
250 if (ompd_state_val
> 0) {
258 /* Helper routine for the ompd_get_nthreads routines */
259 static ompd_rc_t
ompd_get_nthreads_aux(ompd_thread_handle_t
*thread_handle
,
261 uint32_t *current_nesting_level
,
264 return ompd_rc_stale_handle
;
265 if (!thread_handle
->ah
)
266 return ompd_rc_stale_handle
;
267 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
269 return ompd_rc_stale_handle
;
271 return ompd_rc_callback_error
;
274 ompd_rc_t ret
= TValue(context
, "__kmp_nested_nth")
275 .cast("kmp_nested_nthreads_t")
277 .castBase(ompd_type_int
)
279 if (ret
!= ompd_rc_ok
)
283 TValue(context
, thread_handle
->th
) /*__kmp_threads[t]->th*/
284 .cast("kmp_base_info_t")
285 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
286 .cast("kmp_taskdata_t", 1);
289 .access("td_team") /*__kmp_threads[t]->th.th_current_task.td_team*/
290 .cast("kmp_team_p", 1)
291 .access("t") /*__kmp_threads[t]->th.th_current_task.td_team->t*/
292 .cast("kmp_base_team_t", 0) /*t*/
293 .access("t_level") /*t.t_level*/
294 .castBase(ompd_type_int
)
295 .getValue(*current_nesting_level
);
296 if (ret
!= ompd_rc_ok
)
299 ret
= taskdata
.cast("kmp_taskdata_t", 1)
300 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
301 .cast("kmp_internal_control_t", 0)
303 "nproc") /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
304 .castBase(ompd_type_int
)
306 if (ret
!= ompd_rc_ok
)
312 static ompd_rc_t
ompd_get_nthreads(
313 ompd_thread_handle_t
*thread_handle
, /* IN: handle for the thread */
314 ompd_word_t
*nthreads_var_val
/* OUT: nthreads-var (of integer type)
319 uint32_t current_nesting_level
;
322 ret
= ompd_get_nthreads_aux(thread_handle
, &used
, ¤t_nesting_level
,
324 if (ret
!= ompd_rc_ok
)
327 /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
328 *nthreads_var_val
= nproc
;
329 /* If the nthreads-var is a list with more than one element, then the value of
330 this ICV cannot be represented by an integer type. In this case,
331 ompd_rc_incomplete is returned. The tool can check the return value and
332 can choose to invoke ompd_get_icv_string_from_scope() if needed. */
333 if (current_nesting_level
< used
- 1) {
334 return ompd_rc_incomplete
;
339 static ompd_rc_t
ompd_get_nthreads(
340 ompd_thread_handle_t
*thread_handle
, /* IN: handle for the thread */
341 const char **nthreads_list_string
/* OUT: string list of comma separated
346 uint32_t current_nesting_level
;
349 ret
= ompd_get_nthreads_aux(thread_handle
, &used
, ¤t_nesting_level
,
351 if (ret
!= ompd_rc_ok
)
354 uint32_t num_list_elems
;
355 if (used
== 0 || current_nesting_level
>= used
) {
358 num_list_elems
= used
- current_nesting_level
;
360 size_t buffer_size
= 16 /* digits per element including the comma separator */
362 1; /* string terminator NULL */
363 char *nthreads_list_str
;
364 ret
= callbacks
->alloc_memory(buffer_size
, (void **)&nthreads_list_str
);
365 if (ret
!= ompd_rc_ok
)
368 /* The nthreads-var list would be:
369 [__kmp_threads[t]->th.th_current_task->td_icvs.nproc,
370 __kmp_nested_nth.nth[current_nesting_level + 1],
371 __kmp_nested_nth.nth[current_nesting_level + 2],
373 __kmp_nested_nth.nth[used - 1]]*/
375 sprintf(nthreads_list_str
, "%d", nproc
);
376 *nthreads_list_string
= nthreads_list_str
;
377 if (num_list_elems
== 1) {
384 for (current_nesting_level
++; /* the list element for this nesting
385 * level has already been accounted for
387 current_nesting_level
< used
; current_nesting_level
++) {
389 ret
= TValue(thread_handle
->ah
->context
, "__kmp_nested_nth")
390 .cast("kmp_nested_nthreads_t")
393 .getArrayElement(current_nesting_level
)
394 .castBase(ompd_type_int
)
395 .getValue(nth_value
);
397 if (ret
!= ompd_rc_ok
)
400 sprintf(temp_value
, ",%d", nth_value
);
401 strcat(nthreads_list_str
, temp_value
);
407 static ompd_rc_t
ompd_get_display_affinity(
408 ompd_address_space_handle_t
409 *addr_handle
, /* IN: handle for the address space */
410 ompd_word_t
*display_affinity_val
/* OUT: display affinity value */
412 ompd_address_space_context_t
*context
= addr_handle
->context
;
414 return ompd_rc_stale_handle
;
418 return ompd_rc_callback_error
;
420 ret
= TValue(context
, "__kmp_display_affinity")
421 .castBase("__kmp_display_affinity")
422 .getValue(*display_affinity_val
);
426 static ompd_rc_t
ompd_get_affinity_format(
427 ompd_address_space_handle_t
*addr_handle
, /* IN: address space handle*/
428 const char **affinity_format_string
/* OUT: affinity format string */
430 ompd_address_space_context_t
*context
= addr_handle
->context
;
432 return ompd_rc_stale_handle
;
435 return ompd_rc_callback_error
;
438 ret
= TValue(context
, "__kmp_affinity_format")
440 .getString(affinity_format_string
);
444 static ompd_rc_t
ompd_get_tool_libraries(
445 ompd_address_space_handle_t
*addr_handle
, /* IN: address space handle*/
446 const char **tool_libraries_string
/* OUT: tool libraries string */
448 if (!tool_libraries_string
)
449 return ompd_rc_bad_input
;
451 ompd_address_space_context_t
*context
= addr_handle
->context
;
453 return ompd_rc_stale_handle
;
456 return ompd_rc_callback_error
;
459 ret
= TValue(context
, "__kmp_tool_libraries")
461 .getString(tool_libraries_string
);
462 if (ret
== ompd_rc_unsupported
) {
463 ret
= create_empty_string(tool_libraries_string
);
468 static ompd_rc_t
ompd_get_default_device(
469 ompd_thread_handle_t
*thread_handle
, /* IN: handle for the thread */
470 ompd_word_t
*default_device_val
/* OUT: default device value */
473 return ompd_rc_stale_handle
;
474 if (!thread_handle
->ah
)
475 return ompd_rc_stale_handle
;
476 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
478 return ompd_rc_stale_handle
;
480 return ompd_rc_callback_error
;
483 TValue(context
, thread_handle
->th
) /*__kmp_threads[t]->th*/
484 .cast("kmp_base_info_t")
485 .access("th_current_task") /*__kmp_threads[t]->th.th_current_task*/
486 .cast("kmp_taskdata_t", 1)
487 .access("td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
488 .cast("kmp_internal_control_t", 0)
489 /*__kmp_threads[t]->th.th_current_task->td_icvs.default_device*/
490 .access("default_device")
492 .getValue(*default_device_val
);
497 ompd_get_tool(ompd_address_space_handle_t
498 *addr_handle
, /* IN: handle for the address space */
499 ompd_word_t
*tool_val
/* OUT: tool value */
501 ompd_address_space_context_t
*context
= addr_handle
->context
;
503 return ompd_rc_stale_handle
;
505 return ompd_rc_callback_error
;
510 TValue(context
, "__kmp_tool").castBase("__kmp_tool").getValue(*tool_val
);
514 static ompd_rc_t
ompd_get_tool_verbose_init(
515 ompd_address_space_handle_t
*addr_handle
, /* IN: address space handle*/
516 const char **tool_verbose_init_string
/* OUT: tool verbose init string */
518 ompd_address_space_context_t
*context
= addr_handle
->context
;
520 return ompd_rc_stale_handle
;
523 return ompd_rc_callback_error
;
526 ret
= TValue(context
, "__kmp_tool_verbose_init")
528 .getString(tool_verbose_init_string
);
529 if (ret
== ompd_rc_unsupported
) {
530 ret
= create_empty_string(tool_verbose_init_string
);
535 static ompd_rc_t
ompd_get_level(
536 ompd_parallel_handle_t
*parallel_handle
, /* IN: OpenMP parallel handle */
537 ompd_word_t
*val
/* OUT: nesting level */
539 if (!parallel_handle
->ah
)
540 return ompd_rc_stale_handle
;
541 ompd_address_space_context_t
*context
= parallel_handle
->ah
->context
;
543 return ompd_rc_stale_handle
;
546 return ompd_rc_callback_error
;
551 ompd_rc_t ret
= TValue(context
, parallel_handle
->th
)
552 .cast("kmp_base_team_t", 0) /*t*/
553 .access("t_level") /*t.t_level*/
560 static ompd_rc_t
ompd_get_active_level(
561 ompd_parallel_handle_t
*parallel_handle
, /* IN: OpenMP parallel handle */
562 ompd_word_t
*val
/* OUT: active nesting level */
564 if (!parallel_handle
->ah
)
565 return ompd_rc_stale_handle
;
566 ompd_address_space_context_t
*context
= parallel_handle
->ah
->context
;
568 return ompd_rc_stale_handle
;
570 return ompd_rc_callback_error
;
575 ompd_rc_t ret
= TValue(context
, parallel_handle
->th
)
576 .cast("kmp_base_team_t", 0) /*t*/
577 .access("t_active_level") /*t.t_active_level*/
585 ompd_get_num_procs(ompd_address_space_handle_t
586 *addr_handle
, /* IN: handle for the address space */
587 ompd_word_t
*val
/* OUT: number of processes */
589 ompd_address_space_context_t
*context
= addr_handle
->context
;
591 return ompd_rc_stale_handle
;
593 return ompd_rc_callback_error
;
597 return ompd_rc_bad_input
;
601 ret
= TValue(context
, "__kmp_avail_proc")
602 .castBase("__kmp_avail_proc")
608 static ompd_rc_t
ompd_get_thread_limit(
609 ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
610 ompd_word_t
*val
/* OUT: max number of threads */
612 if (!task_handle
->ah
)
613 return ompd_rc_stale_handle
;
614 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
616 return ompd_rc_stale_handle
;
618 return ompd_rc_callback_error
;
621 ompd_rc_t ret
= TValue(context
, task_handle
->th
)
622 .cast("kmp_taskdata_t") // td
623 .access("td_icvs") // td->td_icvs
624 .cast("kmp_internal_control_t", 0)
625 .access("thread_limit") // td->td_icvs.thread_limit
632 static ompd_rc_t
ompd_get_thread_num(
633 ompd_thread_handle_t
*thread_handle
, /* IN: OpenMP thread handle*/
634 ompd_word_t
*val
/* OUT: number of the thread within the team */
637 return ompd_rc_stale_handle
;
638 if (!thread_handle
->ah
)
639 return ompd_rc_stale_handle
;
640 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
642 return ompd_rc_stale_handle
;
644 return ompd_rc_callback_error
;
648 TValue(context
, thread_handle
->th
) /*__kmp_threads[t]->th*/
649 .cast("kmp_base_info_t")
650 .access("th_info") /*__kmp_threads[t]->th.th_info*/
652 .access("ds") /*__kmp_threads[t]->th.th_info.ds*/
653 .cast("kmp_desc_base_t")
654 .access("ds_tid") /*__kmp_threads[t]->th.th_info.ds.ds_tid*/
661 ompd_in_final(ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
662 ompd_word_t
*val
/* OUT: max number of threads */
664 if (!task_handle
->ah
)
665 return ompd_rc_stale_handle
;
666 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
668 return ompd_rc_stale_handle
;
670 return ompd_rc_callback_error
;
673 ompd_rc_t ret
= TValue(context
, task_handle
->th
)
674 .cast("kmp_taskdata_t") // td
675 .access("td_flags") // td->td_flags
676 .cast("kmp_tasking_flags_t")
677 .check("final", val
); // td->td_flags.tasktype
682 static ompd_rc_t
ompd_get_max_active_levels(
683 ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
684 ompd_word_t
*val
/* OUT: max number of threads */
686 if (!task_handle
->ah
)
687 return ompd_rc_stale_handle
;
688 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
690 return ompd_rc_stale_handle
;
692 return ompd_rc_callback_error
;
696 TValue(context
, task_handle
->th
)
697 .cast("kmp_taskdata_t") // td
698 .access("td_icvs") // td->td_icvs
699 .cast("kmp_internal_control_t", 0)
700 .access("max_active_levels") // td->td_icvs.max_active_levels
707 static ompd_rc_t
ompd_get_run_schedule(
708 ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
709 const char **run_sched_string
/* OUT: Run Schedule String
710 consisting of kind and modifier */
712 if (!task_handle
->ah
)
713 return ompd_rc_stale_handle
;
714 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
716 return ompd_rc_stale_handle
;
718 return ompd_rc_callback_error
;
723 TValue sched
= TValue(context
, task_handle
->th
)
724 .cast("kmp_taskdata_t") // td
725 .access("td_icvs") // td->td_icvs
726 .cast("kmp_internal_control_t", 0)
727 .access("sched") // td->td_icvs.sched
728 .cast("kmp_r_sched_t", 0);
730 ompd_rc_t ret
= sched
731 .access("r_sched_type") // td->td_icvs.sched.r_sched_type
734 if (ret
!= ompd_rc_ok
) {
739 .access("chunk") // td->td_icvs.sched.chunk
742 if (ret
!= ompd_rc_ok
) {
745 char *run_sched_var_string
;
746 ret
= callbacks
->alloc_memory(100, (void **)&run_sched_var_string
);
747 if (ret
!= ompd_rc_ok
) {
750 run_sched_var_string
[0] = '\0';
751 if (SCHEDULE_HAS_MONOTONIC(kind
)) {
752 strcpy(run_sched_var_string
, "monotonic:");
753 } else if (SCHEDULE_HAS_NONMONOTONIC(kind
)) {
754 strcpy(run_sched_var_string
, "nonmonotonic:");
757 bool static_unchunked
= false;
758 switch (SCHEDULE_WITHOUT_MODIFIERS(kind
)) {
760 case kmp_sch_static_greedy
:
761 case kmp_sch_static_balanced
:
762 static_unchunked
= true;
763 strcat(run_sched_var_string
, "static");
765 case kmp_sch_static_chunked
:
766 strcat(run_sched_var_string
, "static");
768 case kmp_sch_dynamic_chunked
:
769 strcat(run_sched_var_string
, "dynamic");
771 case kmp_sch_guided_chunked
:
772 case kmp_sch_guided_iterative_chunked
:
773 case kmp_sch_guided_analytical_chunked
:
774 strcat(run_sched_var_string
, "guided");
777 strcat(run_sched_var_string
, "auto");
779 case kmp_sch_trapezoidal
:
780 strcat(run_sched_var_string
, "trapezoidal");
782 case kmp_sch_static_steal
:
783 strcat(run_sched_var_string
, "static_steal");
786 ret
= callbacks
->free_memory((void *)(run_sched_var_string
));
787 if (ret
!= ompd_rc_ok
) {
790 ret
= create_empty_string(run_sched_string
);
794 if (static_unchunked
== true) {
795 // To be in sync with what OMPT returns.
796 // Chunk was not set. Shown with a zero value.
801 sprintf(temp_str
, ",%d", chunk
);
802 strcat(run_sched_var_string
, temp_str
);
803 *run_sched_string
= run_sched_var_string
;
807 /* Helper routine for the ompd_get_proc_bind routines */
808 static ompd_rc_t
ompd_get_proc_bind_aux(ompd_task_handle_t
*task_handle
,
810 uint32_t *current_nesting_level
,
811 uint32_t *proc_bind
) {
812 if (!task_handle
->ah
)
813 return ompd_rc_stale_handle
;
814 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
816 return ompd_rc_stale_handle
;
818 return ompd_rc_callback_error
;
821 ompd_rc_t ret
= TValue(context
, "__kmp_nested_proc_bind")
822 .cast("kmp_nested_proc_bind_t")
824 .castBase(ompd_type_int
)
826 if (ret
!= ompd_rc_ok
)
829 TValue taskdata
= TValue(context
, task_handle
->th
) /* td */
830 .cast("kmp_taskdata_t");
833 .access("td_team") /* td->td_team*/
834 .cast("kmp_team_p", 1)
835 .access("t") /* td->td_team->t*/
836 .cast("kmp_base_team_t", 0) /*t*/
837 .access("t_level") /*t.t_level*/
838 .castBase(ompd_type_int
)
839 .getValue(*current_nesting_level
);
840 if (ret
!= ompd_rc_ok
)
844 .access("td_icvs") /* td->td_icvs */
845 .cast("kmp_internal_control_t", 0)
846 .access("proc_bind") /* td->td_icvs.proc_bind */
848 .getValue(*proc_bind
);
853 ompd_get_proc_bind(ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle */
854 ompd_word_t
*bind
/* OUT: Kind of proc-binding */
858 uint32_t current_nesting_level
;
861 ret
= ompd_get_proc_bind_aux(task_handle
, &used
, ¤t_nesting_level
,
863 if (ret
!= ompd_rc_ok
)
867 /* If bind-var is a list with more than one element, then the value of
868 this ICV cannot be represented by an integer type. In this case,
869 ompd_rc_incomplete is returned. The tool can check the return value and
870 can choose to invoke ompd_get_icv_string_from_scope() if needed. */
871 if (current_nesting_level
< used
- 1) {
872 return ompd_rc_incomplete
;
877 static ompd_rc_t
ompd_get_proc_bind(
878 ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle */
879 const char **proc_bind_list_string
/* OUT: string list of comma separated
884 uint32_t current_nesting_level
;
887 ret
= ompd_get_proc_bind_aux(task_handle
, &used
, ¤t_nesting_level
,
889 if (ret
!= ompd_rc_ok
)
892 uint32_t num_list_elems
;
893 if (used
== 0 || current_nesting_level
>= used
) {
896 num_list_elems
= used
- current_nesting_level
;
898 size_t buffer_size
= 16 /* digits per element including the comma separator */
900 1; /* string terminator NULL */
901 char *proc_bind_list_str
;
902 ret
= callbacks
->alloc_memory(buffer_size
, (void **)&proc_bind_list_str
);
903 if (ret
!= ompd_rc_ok
)
906 /* The bind-var list would be:
907 [td->td_icvs.proc_bind,
908 __kmp_nested_proc_bind.bind_types[current_nesting_level + 1],
909 __kmp_nested_proc_bind.bind_types[current_nesting_level + 2],
911 __kmp_nested_proc_bind.bind_types[used - 1]]*/
913 sprintf(proc_bind_list_str
, "%d", proc_bind
);
914 *proc_bind_list_string
= proc_bind_list_str
;
915 if (num_list_elems
== 1) {
920 uint32_t bind_types_value
;
922 for (current_nesting_level
++; /* the list element for this nesting
923 level has already been accounted for
925 current_nesting_level
< used
; current_nesting_level
++) {
927 ret
= TValue(task_handle
->ah
->context
, "__kmp_nested_proc_bind")
928 .cast("kmp_nested_proc_bind_t")
929 .access("bind_types")
931 .getArrayElement(current_nesting_level
)
932 .castBase(ompd_type_int
)
933 .getValue(bind_types_value
);
935 if (ret
!= ompd_rc_ok
)
938 sprintf(temp_value
, ",%d", bind_types_value
);
939 strcat(proc_bind_list_str
, temp_value
);
946 ompd_is_implicit(ompd_task_handle_t
*task_handle
, /* IN: OpenMP task handle*/
947 ompd_word_t
*val
/* OUT: max number of threads */
950 return ompd_rc_stale_handle
;
951 if (!task_handle
->ah
)
952 return ompd_rc_stale_handle
;
953 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
955 return ompd_rc_stale_handle
;
957 return ompd_rc_callback_error
;
960 ompd_rc_t ret
= TValue(context
, task_handle
->th
)
961 .cast("kmp_taskdata_t") // td
962 .access("td_flags") // td->td_flags
963 .cast("kmp_tasking_flags_t")
964 .check("tasktype", val
); // td->td_flags.tasktype
965 *val
^= 1; // tasktype: explicit = 1, implicit = 0 => invert the value
969 ompd_rc_t
ompd_get_num_threads(
970 ompd_parallel_handle_t
*parallel_handle
, /* IN: OpenMP parallel handle */
971 ompd_word_t
*val
/* OUT: number of threads */
973 if (!parallel_handle
->ah
)
974 return ompd_rc_stale_handle
;
975 ompd_address_space_context_t
*context
= parallel_handle
->ah
->context
;
977 return ompd_rc_stale_handle
;
979 return ompd_rc_callback_error
;
982 ompd_rc_t ret
= ompd_rc_ok
;
983 if (parallel_handle
->lwt
.address
!= 0) {
987 ret
= TValue(context
, parallel_handle
->th
)
988 .cast("kmp_base_team_t", 0) /*t*/
989 .access("t_nproc") /*t.t_nproc*/
997 ompd_rc_t
ompd_get_icv_from_scope(void *handle
, ompd_scope_t scope
,
998 ompd_icv_id_t icv_id
,
999 ompd_word_t
*icv_value
) {
1001 return ompd_rc_stale_handle
;
1003 if (icv_id
>= ompd_icv_after_last_icv
|| icv_id
== 0) {
1004 return ompd_rc_bad_input
;
1006 if (scope
!= ompd_icv_scope_values
[icv_id
]) {
1007 return ompd_rc_bad_input
;
1010 ompd_device_t device_kind
;
1013 case ompd_scope_thread
:
1014 device_kind
= ((ompd_thread_handle_t
*)handle
)->ah
->kind
;
1016 case ompd_scope_parallel
:
1017 device_kind
= ((ompd_parallel_handle_t
*)handle
)->ah
->kind
;
1019 case ompd_scope_address_space
:
1020 device_kind
= ((ompd_address_space_handle_t
*)handle
)->kind
;
1022 case ompd_scope_task
:
1023 device_kind
= ((ompd_task_handle_t
*)handle
)->ah
->kind
;
1026 return ompd_rc_bad_input
;
1029 if (device_kind
== OMPD_DEVICE_KIND_HOST
) {
1031 case ompd_icv_dyn_var
:
1032 return ompd_get_dynamic((ompd_thread_handle_t
*)handle
, icv_value
);
1033 case ompd_icv_run_sched_var
:
1034 return ompd_rc_incompatible
;
1035 case ompd_icv_stacksize_var
:
1036 return ompd_get_stacksize((ompd_address_space_handle_t
*)handle
,
1038 case ompd_icv_cancel_var
:
1039 return ompd_get_cancellation((ompd_address_space_handle_t
*)handle
,
1041 case ompd_icv_max_task_priority_var
:
1042 return ompd_get_max_task_priority((ompd_address_space_handle_t
*)handle
,
1044 case ompd_icv_debug_var
:
1045 return ompd_get_debug((ompd_address_space_handle_t
*)handle
, icv_value
);
1046 case ompd_icv_nthreads_var
:
1047 return ompd_get_nthreads((ompd_thread_handle_t
*)handle
, icv_value
);
1048 case ompd_icv_display_affinity_var
:
1049 return ompd_get_display_affinity((ompd_address_space_handle_t
*)handle
,
1051 case ompd_icv_affinity_format_var
:
1052 return ompd_rc_incompatible
;
1053 case ompd_icv_tool_libraries_var
:
1054 return ompd_rc_incompatible
;
1055 case ompd_icv_default_device_var
:
1056 return ompd_get_default_device((ompd_thread_handle_t
*)handle
, icv_value
);
1057 case ompd_icv_tool_var
:
1058 return ompd_get_tool((ompd_address_space_handle_t
*)handle
, icv_value
);
1059 case ompd_icv_tool_verbose_init_var
:
1060 return ompd_rc_incompatible
;
1061 case ompd_icv_levels_var
:
1062 return ompd_get_level((ompd_parallel_handle_t
*)handle
, icv_value
);
1063 case ompd_icv_active_levels_var
:
1064 return ompd_get_active_level((ompd_parallel_handle_t
*)handle
, icv_value
);
1065 case ompd_icv_thread_limit_var
:
1066 return ompd_get_thread_limit((ompd_task_handle_t
*)handle
, icv_value
);
1067 case ompd_icv_max_active_levels_var
:
1068 return ompd_get_max_active_levels((ompd_task_handle_t
*)handle
,
1070 case ompd_icv_bind_var
:
1071 return ompd_get_proc_bind((ompd_task_handle_t
*)handle
, icv_value
);
1072 case ompd_icv_num_procs_var
:
1073 case ompd_icv_ompd_num_procs_var
:
1074 return ompd_get_num_procs((ompd_address_space_handle_t
*)handle
,
1076 case ompd_icv_thread_num_var
:
1077 case ompd_icv_ompd_thread_num_var
:
1078 return ompd_get_thread_num((ompd_thread_handle_t
*)handle
, icv_value
);
1079 case ompd_icv_final_var
:
1080 case ompd_icv_ompd_final_var
:
1081 case ompd_icv_ompd_final_task_var
:
1082 return ompd_in_final((ompd_task_handle_t
*)handle
, icv_value
);
1083 case ompd_icv_implicit_var
:
1084 case ompd_icv_ompd_implicit_var
:
1085 case ompd_icv_ompd_implicit_task_var
:
1086 return ompd_is_implicit((ompd_task_handle_t
*)handle
, icv_value
);
1087 case ompd_icv_team_size_var
:
1088 case ompd_icv_ompd_team_size_var
:
1089 return ompd_get_num_threads((ompd_parallel_handle_t
*)handle
, icv_value
);
1091 return ompd_rc_unsupported
;
1094 return ompd_rc_unsupported
;
1097 ompd_rc_t
ompd_get_icv_string_from_scope(void *handle
, ompd_scope_t scope
,
1098 ompd_icv_id_t icv_id
,
1099 const char **icv_string
) {
1101 return ompd_rc_stale_handle
;
1103 if (icv_id
>= ompd_icv_after_last_icv
|| icv_id
== 0) {
1104 return ompd_rc_bad_input
;
1106 if (scope
!= ompd_icv_scope_values
[icv_id
]) {
1107 return ompd_rc_bad_input
;
1110 ompd_device_t device_kind
;
1113 case ompd_scope_thread
:
1114 device_kind
= ((ompd_thread_handle_t
*)handle
)->ah
->kind
;
1116 case ompd_scope_parallel
:
1117 device_kind
= ((ompd_parallel_handle_t
*)handle
)->ah
->kind
;
1119 case ompd_scope_address_space
:
1120 device_kind
= ((ompd_address_space_handle_t
*)handle
)->kind
;
1122 case ompd_scope_task
:
1123 device_kind
= ((ompd_task_handle_t
*)handle
)->ah
->kind
;
1126 return ompd_rc_bad_input
;
1129 if (device_kind
== OMPD_DEVICE_KIND_HOST
) {
1131 case ompd_icv_run_sched_var
:
1132 return ompd_get_run_schedule((ompd_task_handle_t
*)handle
, icv_string
);
1133 case ompd_icv_nthreads_var
:
1134 return ompd_get_nthreads((ompd_thread_handle_t
*)handle
, icv_string
);
1135 case ompd_icv_bind_var
:
1136 return ompd_get_proc_bind((ompd_task_handle_t
*)handle
, icv_string
);
1137 case ompd_icv_affinity_format_var
:
1138 return ompd_get_affinity_format((ompd_address_space_handle_t
*)handle
,
1140 case ompd_icv_tool_libraries_var
:
1141 return ompd_get_tool_libraries((ompd_address_space_handle_t
*)handle
,
1143 case ompd_icv_tool_verbose_init_var
:
1144 return ompd_get_tool_verbose_init((ompd_address_space_handle_t
*)handle
,
1147 return ompd_rc_unsupported
;
1150 return ompd_rc_unsupported
;
1153 static ompd_rc_t
__ompd_get_tool_data(TValue
&dataValue
, ompd_word_t
*value
,
1154 ompd_address_t
*ptr
) {
1155 ompd_rc_t ret
= dataValue
.getError();
1156 if (ret
!= ompd_rc_ok
)
1158 ret
= dataValue
.access("value").castBase().getValue(*value
);
1159 if (ret
!= ompd_rc_ok
)
1161 ptr
->segment
= OMPD_SEGMENT_UNSPECIFIED
;
1162 ret
= dataValue
.access("ptr").castBase().getValue(ptr
->address
);
1166 ompd_rc_t
ompd_get_task_data(ompd_task_handle_t
*task_handle
,
1167 ompd_word_t
*value
, ompd_address_t
*ptr
) {
1168 ompd_address_space_context_t
*context
= task_handle
->ah
->context
;
1170 return ompd_rc_stale_handle
;
1172 return ompd_rc_callback_error
;
1176 if (task_handle
->lwt
.address
) {
1177 dataValue
= TValue(context
, task_handle
->lwt
)
1178 .cast("ompt_lw_taskteam_t") /*lwt*/
1179 .access("ompt_task_info") // lwt->ompt_task_info
1180 .cast("ompt_task_info_t")
1181 .access("task_data") // lwt->ompd_task_info.task_data
1182 .cast("ompt_data_t");
1184 dataValue
= TValue(context
, task_handle
->th
)
1185 .cast("kmp_taskdata_t") /*td*/
1186 .access("ompt_task_info") // td->ompt_task_info
1187 .cast("ompt_task_info_t")
1188 .access("task_data") // td->ompd_task_info.task_data
1189 .cast("ompt_data_t");
1191 return __ompd_get_tool_data(dataValue
, value
, ptr
);
1194 ompd_rc_t
ompd_get_parallel_data(ompd_parallel_handle_t
*parallel_handle
,
1195 ompd_word_t
*value
, ompd_address_t
*ptr
) {
1196 ompd_address_space_context_t
*context
= parallel_handle
->ah
->context
;
1198 return ompd_rc_stale_handle
;
1200 return ompd_rc_callback_error
;
1204 if (parallel_handle
->lwt
.address
) {
1206 TValue(context
, parallel_handle
->lwt
)
1207 .cast("ompt_lw_taskteam_t") /*lwt*/
1208 .access("ompt_team_info") // lwt->ompt_team_info
1209 .cast("ompt_team_info_t")
1210 .access("parallel_data") // lwt->ompt_team_info.parallel_data
1211 .cast("ompt_data_t");
1213 dataValue
= TValue(context
, parallel_handle
->th
)
1214 .cast("kmp_base_team_t") /*t*/
1215 .access("ompt_team_info") // t->ompt_team_info
1216 .cast("ompt_team_info_t")
1217 .access("parallel_data") // t->ompt_team_info.parallel_data
1218 .cast("ompt_data_t");
1220 return __ompd_get_tool_data(dataValue
, value
, ptr
);
1223 ompd_rc_t
ompd_get_thread_data(ompd_thread_handle_t
*thread_handle
,
1224 ompd_word_t
*value
, ompd_address_t
*ptr
) {
1225 ompd_address_space_context_t
*context
= thread_handle
->ah
->context
;
1227 return ompd_rc_stale_handle
;
1229 return ompd_rc_callback_error
;
1233 TValue(context
, thread_handle
->th
)
1234 .cast("kmp_base_info_t") /*th*/
1235 .access("ompt_thread_info") // th->ompt_thread_info
1236 .cast("ompt_thread_info_t")
1237 .access("thread_data") // th->ompt_thread_info.thread_data
1238 .cast("ompt_data_t");
1239 return __ompd_get_tool_data(dataValue
, value
, ptr
);
1242 ompd_rc_t
ompd_get_tool_data(void *handle
, ompd_scope_t scope
,
1243 ompd_word_t
*value
, ompd_address_t
*ptr
) {
1245 return ompd_rc_stale_handle
;
1248 ompd_device_t device_kind
;
1251 case ompd_scope_thread
:
1252 device_kind
= ((ompd_thread_handle_t
*)handle
)->ah
->kind
;
1254 case ompd_scope_parallel
:
1255 device_kind
= ((ompd_parallel_handle_t
*)handle
)->ah
->kind
;
1257 case ompd_scope_task
:
1258 device_kind
= ((ompd_task_handle_t
*)handle
)->ah
->kind
;
1261 return ompd_rc_bad_input
;
1264 if (device_kind
== OMPD_DEVICE_KIND_HOST
) {
1266 case ompd_scope_thread
:
1267 return ompd_get_thread_data((ompd_thread_handle_t
*)handle
, value
, ptr
);
1268 case ompd_scope_parallel
:
1269 return ompd_get_parallel_data((ompd_parallel_handle_t
*)handle
, value
,
1271 case ompd_scope_task
:
1272 return ompd_get_task_data((ompd_task_handle_t
*)handle
, value
, ptr
);
1274 return ompd_rc_unsupported
;
1277 return ompd_rc_unsupported
;