5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
13 #ifndef KMP_TASKDEPS_H
14 #define KMP_TASKDEPS_H
18 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid))
19 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid))
21 static inline void __kmp_node_deref(kmp_info_t
*thread
, kmp_depnode_t
*node
) {
25 kmp_int32 n
= KMP_ATOMIC_DEC(&node
->dn
.nrefs
) - 1;
26 KMP_DEBUG_ASSERT(n
>= 0);
28 #if USE_ITT_BUILD && USE_ITT_NOTIFY
29 __itt_sync_destroy(node
);
31 KMP_ASSERT(node
->dn
.nrefs
== 0);
33 __kmp_fast_free(thread
, node
);
35 __kmp_thread_free(thread
, node
);
40 static inline void __kmp_depnode_list_free(kmp_info_t
*thread
,
41 kmp_depnode_list
*list
) {
42 kmp_depnode_list
*next
;
44 for (; list
; list
= next
) {
47 __kmp_node_deref(thread
, list
->node
);
49 __kmp_fast_free(thread
, list
);
51 __kmp_thread_free(thread
, list
);
56 static inline void __kmp_dephash_free_entries(kmp_info_t
*thread
,
58 for (size_t i
= 0; i
< h
->size
; i
++) {
60 kmp_dephash_entry_t
*next
;
61 for (kmp_dephash_entry_t
*entry
= h
->buckets
[i
]; entry
; entry
= next
) {
62 next
= entry
->next_in_bucket
;
63 __kmp_depnode_list_free(thread
, entry
->last_set
);
64 __kmp_depnode_list_free(thread
, entry
->prev_set
);
65 __kmp_node_deref(thread
, entry
->last_out
);
66 if (entry
->mtx_lock
) {
67 __kmp_destroy_lock(entry
->mtx_lock
);
68 __kmp_free(entry
->mtx_lock
);
71 __kmp_fast_free(thread
, entry
);
73 __kmp_thread_free(thread
, entry
);
79 __kmp_node_deref(thread
, h
->last_all
);
83 static inline void __kmp_dephash_free(kmp_info_t
*thread
, kmp_dephash_t
*h
) {
84 __kmp_dephash_free_entries(thread
, h
);
86 __kmp_fast_free(thread
, h
);
88 __kmp_thread_free(thread
, h
);
92 extern void __kmpc_give_task(kmp_task_t
*ptask
, kmp_int32 start
);
94 static inline void __kmp_release_deps(kmp_int32 gtid
, kmp_taskdata_t
*task
) {
97 if (task
->is_taskgraph
&& !(__kmp_tdg_is_recording(task
->tdg
->tdg_status
))) {
98 kmp_node_info_t
*TaskInfo
= &(task
->tdg
->record_map
[task
->td_task_id
]);
100 for (int i
= 0; i
< TaskInfo
->nsuccessors
; i
++) {
101 kmp_int32 successorNumber
= TaskInfo
->successors
[i
];
102 kmp_node_info_t
*successor
= &(task
->tdg
->record_map
[successorNumber
]);
103 kmp_int32 npredecessors
= KMP_ATOMIC_DEC(&successor
->npredecessors_counter
) - 1;
104 if (successor
->task
!= nullptr && npredecessors
== 0) {
105 __kmp_omp_task(gtid
, successor
->task
, false);
112 kmp_info_t
*thread
= __kmp_threads
[gtid
];
113 kmp_depnode_t
*node
= task
->td_depnode
;
115 // Check mutexinoutset dependencies, release locks
116 if (UNLIKELY(node
&& (node
->dn
.mtx_num_locks
< 0))) {
117 // negative num_locks means all locks were acquired
118 node
->dn
.mtx_num_locks
= -node
->dn
.mtx_num_locks
;
119 for (int i
= node
->dn
.mtx_num_locks
- 1; i
>= 0; --i
) {
120 KMP_DEBUG_ASSERT(node
->dn
.mtx_locks
[i
] != NULL
);
121 __kmp_release_lock(node
->dn
.mtx_locks
[i
], gtid
);
125 if (task
->td_dephash
) {
127 40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
129 __kmp_dephash_free(thread
, task
->td_dephash
);
130 task
->td_dephash
= NULL
;
136 KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n",
139 KMP_ACQUIRE_DEPNODE(gtid
, node
);
141 if (!task
->is_taskgraph
||
142 (task
->is_taskgraph
&& !__kmp_tdg_is_recording(task
->tdg
->tdg_status
)))
145 NULL
; // mark this task as finished, so no new dependencies are generated
146 KMP_RELEASE_DEPNODE(gtid
, node
);
148 kmp_depnode_list_t
*next
;
149 kmp_taskdata_t
*next_taskdata
;
150 for (kmp_depnode_list_t
*p
= node
->dn
.successors
; p
; p
= next
) {
151 kmp_depnode_t
*successor
= p
->node
;
152 #if USE_ITT_BUILD && USE_ITT_NOTIFY
153 __itt_sync_releasing(successor
);
155 kmp_int32 npredecessors
= KMP_ATOMIC_DEC(&successor
->dn
.npredecessors
) - 1;
157 // successor task can be NULL for wait_depends or because deps are still
159 if (npredecessors
== 0) {
160 #if USE_ITT_BUILD && USE_ITT_NOTIFY
161 __itt_sync_acquired(successor
);
164 if (successor
->dn
.task
) {
165 KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled "
167 gtid
, successor
->dn
.task
, task
));
168 // If a regular task depending on a hidden helper task, when the
169 // hidden helper task is done, the regular task should be executed by
170 // its encountering team.
171 if (KMP_HIDDEN_HELPER_THREAD(gtid
)) {
172 // Hidden helper thread can only execute hidden helper tasks
173 KMP_ASSERT(task
->td_flags
.hidden_helper
);
174 next_taskdata
= KMP_TASK_TO_TASKDATA(successor
->dn
.task
);
175 // If the dependent task is a regular task, we need to push to its
176 // encountering thread's queue; otherwise, it can be pushed to its own
178 if (!next_taskdata
->td_flags
.hidden_helper
) {
179 kmp_int32 encountering_gtid
=
180 next_taskdata
->td_alloc_thread
->th
.th_info
.ds
.ds_gtid
;
181 kmp_int32 encountering_tid
= __kmp_tid_from_gtid(encountering_gtid
);
182 __kmpc_give_task(successor
->dn
.task
, encountering_tid
);
184 __kmp_omp_task(gtid
, successor
->dn
.task
, false);
187 __kmp_omp_task(gtid
, successor
->dn
.task
, false);
193 __kmp_node_deref(thread
, p
->node
);
195 __kmp_fast_free(thread
, p
);
197 __kmp_thread_free(thread
, p
);
201 __kmp_node_deref(thread
, node
);
205 ("__kmp_release_deps: T#%d all successors of %p notified of completion\n",
209 #endif // KMP_TASKDEPS_H