2 * ompt-specific.h - header of OMPT internal functions implementation
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
13 #ifndef OMPT_SPECIFIC_H
14 #define OMPT_SPECIFIC_H
19 /*****************************************************************************
20 * forward declarations
21 ****************************************************************************/
23 /// Entrypoint used by libomptarget to register callbacks in libomp, if not
25 void __ompt_force_initialization();
27 void __ompt_team_assign_id(kmp_team_t
*team
, ompt_data_t ompt_pid
);
28 void __ompt_thread_assign_wait_id(void *variable
);
30 void __ompt_lw_taskteam_init(ompt_lw_taskteam_t
*lwt
, kmp_info_t
*thr
, int gtid
,
31 ompt_data_t
*ompt_pid
, void *codeptr
);
33 void __ompt_lw_taskteam_link(ompt_lw_taskteam_t
*lwt
, kmp_info_t
*thr
,
34 int on_heap
, bool always
= false);
36 void __ompt_lw_taskteam_unlink(kmp_info_t
*thr
);
38 ompt_team_info_t
*__ompt_get_teaminfo(int depth
, int *size
);
40 ompt_data_t
*__ompt_get_task_data();
42 ompt_data_t
*__ompt_get_target_task_data();
44 ompt_task_info_t
*__ompt_get_task_info_object(int depth
);
46 int __ompt_get_parallel_info_internal(int ancestor_level
,
47 ompt_data_t
**parallel_data
,
50 int __ompt_get_task_info_internal(int ancestor_level
, int *type
,
51 ompt_data_t
**task_data
,
52 ompt_frame_t
**task_frame
,
53 ompt_data_t
**parallel_data
, int *thread_num
);
55 ompt_data_t
*__ompt_get_thread_data_internal();
59 static uint64_t __ompt_get_get_unique_id_internal();
62 ompt_sync_region_t
__ompt_get_barrier_kind(enum barrier_type
, kmp_info_t
*);
64 /*****************************************************************************
66 ****************************************************************************/
68 #define OMPT_CUR_TASK_INFO(thr) (&((thr)->th.th_current_task->ompt_task_info))
69 #define OMPT_CUR_TASK_DATA(thr) \
70 (&((thr)->th.th_current_task->ompt_task_info.task_data))
71 #define OMPT_CUR_TEAM_INFO(thr) (&((thr)->th.th_team->t.ompt_team_info))
72 #define OMPT_CUR_TEAM_DATA(thr) \
73 (&((thr)->th.th_team->t.ompt_team_info.parallel_data))
75 #define OMPT_HAVE_WEAK_ATTRIBUTE KMP_HAVE_WEAK_ATTRIBUTE
76 #define OMPT_HAVE_PSAPI KMP_HAVE_PSAPI
77 #define OMPT_STR_MATCH(haystack, needle) __kmp_str_match(haystack, 0, needle)
79 inline void *__ompt_load_return_address(int gtid
) {
80 kmp_info_t
*thr
= __kmp_threads
[gtid
];
81 void *return_address
= thr
->th
.ompt_thread_info
.return_address
;
82 thr
->th
.ompt_thread_info
.return_address
= NULL
;
83 return return_address
;
86 /*#define OMPT_STORE_RETURN_ADDRESS(gtid) \
87 if (ompt_enabled.enabled && gtid >= 0 && __kmp_threads[gtid] && \
88 !__kmp_threads[gtid]->th.ompt_thread_info.return_address) \
89 __kmp_threads[gtid]->th.ompt_thread_info.return_address = \
90 __builtin_return_address(0)*/
91 #define OMPT_STORE_RETURN_ADDRESS(gtid) \
92 OmptReturnAddressGuard ReturnAddressGuard{gtid, __builtin_return_address(0)};
93 #define OMPT_LOAD_RETURN_ADDRESS(gtid) __ompt_load_return_address(gtid)
94 #define OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid) \
95 ((ompt_enabled.enabled && gtid >= 0 && __kmp_threads[gtid] && \
96 __kmp_threads[gtid]->th.ompt_thread_info.return_address) \
97 ? __ompt_load_return_address(gtid) \
98 : __builtin_return_address(0))
100 #define OMPT_GET_DISPATCH_CHUNK(chunk, lb, ub, incr) \
103 chunk.start = static_cast<uint64_t>(lb); \
104 chunk.iterations = static_cast<uint64_t>(((ub) - (lb)) / (incr) + 1); \
106 chunk.start = static_cast<uint64_t>(ub); \
107 chunk.iterations = static_cast<uint64_t>(((lb) - (ub)) / -(incr) + 1); \
111 //******************************************************************************
113 //******************************************************************************
115 inline kmp_info_t
*ompt_get_thread_gtid(int gtid
) {
116 return (gtid
>= 0) ? __kmp_thread_from_gtid(gtid
) : NULL
;
119 inline kmp_info_t
*ompt_get_thread() {
120 int gtid
= __kmp_get_gtid();
121 return ompt_get_thread_gtid(gtid
);
124 inline void ompt_set_thread_state(kmp_info_t
*thread
, ompt_state_t state
) {
126 thread
->th
.ompt_thread_info
.state
= state
;
129 inline const char *ompt_get_runtime_version() {
130 return &__kmp_version_lib_ver
[KMP_VERSION_MAGIC_LEN
];
133 class OmptReturnAddressGuard
{
135 bool SetAddress
{false};
139 OmptReturnAddressGuard(int Gtid
, void *ReturnAddress
) : Gtid(Gtid
) {
140 if (ompt_enabled
.enabled
&& Gtid
>= 0 && __kmp_threads
[Gtid
] &&
141 !__kmp_threads
[Gtid
]->th
.ompt_thread_info
.return_address
) {
143 __kmp_threads
[Gtid
]->th
.ompt_thread_info
.return_address
= ReturnAddress
;
146 ~OmptReturnAddressGuard() {
148 __kmp_threads
[Gtid
]->th
.ompt_thread_info
.return_address
= NULL
;
152 #endif // OMPT_SUPPORT
154 // macros providing the OMPT callbacks for reduction clause
155 #if OMPT_SUPPORT && OMPT_OPTIONAL
156 #define OMPT_REDUCTION_DECL(this_thr, gtid) \
157 ompt_data_t *my_task_data = OMPT_CUR_TASK_DATA(this_thr); \
158 ompt_data_t *my_parallel_data = OMPT_CUR_TEAM_DATA(this_thr); \
159 void *return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
160 #define OMPT_REDUCTION_BEGIN \
161 if (ompt_enabled.enabled && ompt_enabled.ompt_callback_reduction) { \
162 ompt_callbacks.ompt_callback(ompt_callback_reduction)( \
163 ompt_sync_region_reduction, ompt_scope_begin, my_parallel_data, \
164 my_task_data, return_address); \
166 #define OMPT_REDUCTION_END \
167 if (ompt_enabled.enabled && ompt_enabled.ompt_callback_reduction) { \
168 ompt_callbacks.ompt_callback(ompt_callback_reduction)( \
169 ompt_sync_region_reduction, ompt_scope_end, my_parallel_data, \
170 my_task_data, return_address); \
172 #else // OMPT_SUPPORT && OMPT_OPTIONAL
173 #define OMPT_REDUCTION_DECL(this_thr, gtid)
174 #define OMPT_REDUCTION_BEGIN
175 #define OMPT_REDUCTION_END
176 #endif // ! OMPT_SUPPORT && OMPT_OPTIONAL