2 * Copyright 2013 Ecole Normale Superieure
4 * Use of this software is governed by the MIT license
6 * Written by Sven Verdoolaege,
7 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
13 #include <isl/union_set.h>
17 /* The functions in this file are used to navigate part of a schedule tree
18 * that is mapped to blocks. Initially, this part consists of a linear
19 * branch segment with a mark node with name "kernel" on the outer end
20 * and a mark node with name "thread" on the inner end.
21 * During the mapping to blocks, branching may be introduced, but only
22 * one of the elements in each sequence contains the "thread" mark.
23 * The filter of this element (and only this filter) contains
24 * domain elements identified by the "core" argument of the functions
25 * that move down this tree.
27 * Synchronization statements have a name that starts with "sync" and
28 * a user pointer pointing to the kernel that contains the synchronization.
29 * The functions inserting or detecting synchronizations take a ppcg_kernel
30 * argument to be able to create or identify such statements.
31 * They may also use two fields in this structure, the "core" field
32 * to move around in the tree and the "n_sync" field to make sure that
33 * each synchronization has a different name (within the kernel).
36 /* Is "node" a mark node with an identifier called "name"?
38 static int is_marked(__isl_keep isl_schedule_node
*node
, const char *name
)
46 if (isl_schedule_node_get_type(node
) != isl_schedule_node_mark
)
49 mark
= isl_schedule_node_mark_get_id(node
);
53 has_name
= !strcmp(isl_id_get_name(mark
), name
);
59 /* Is "node" a mark node with an identifier called "kernel"?
61 int gpu_tree_node_is_kernel(__isl_keep isl_schedule_node
*node
)
63 return is_marked(node
, "kernel");
66 /* Is "node" a mark node with an identifier called "shared"?
68 static int node_is_shared(__isl_keep isl_schedule_node
*node
)
70 return is_marked(node
, "shared");
73 /* Is "node" a mark node with an identifier called "thread"?
75 static int node_is_thread(__isl_keep isl_schedule_node
*node
)
77 return is_marked(node
, "thread");
80 /* Insert a mark node with identifier "shared" in front of "node".
82 static __isl_give isl_schedule_node
*insert_shared(
83 __isl_take isl_schedule_node
*node
)
88 ctx
= isl_schedule_node_get_ctx(node
);
89 id
= isl_id_alloc(ctx
, "shared", NULL
);
90 node
= isl_schedule_node_insert_mark(node
, id
);
95 /* Insert a "shared" mark in front of the "thread" mark
96 * provided the linear branch between "node" and the "thread" mark
97 * does not contain such a "shared" mark already.
99 * As a side effect, this function checks that the subtree at "node"
100 * actually contains a "thread" mark and that there is no branching
101 * in between "node" and this "thread" mark.
103 __isl_give isl_schedule_node
*gpu_tree_insert_shared_before_thread(
104 __isl_take isl_schedule_node
*node
)
112 depth0
= isl_schedule_node_get_tree_depth(node
);
119 any_shared
= node_is_shared(node
);
121 return isl_schedule_node_free(node
);
123 is_thread
= node_is_thread(node
);
125 return isl_schedule_node_free(node
);
128 n
= isl_schedule_node_n_children(node
);
130 isl_die(isl_schedule_node_get_ctx(node
),
132 "no thread marker found",
133 return isl_schedule_node_free(node
));
135 isl_die(isl_schedule_node_get_ctx(node
),
137 "expecting single thread marker",
138 return isl_schedule_node_free(node
));
140 node
= isl_schedule_node_child(node
, 0);
144 node
= insert_shared(node
);
145 depth
= isl_schedule_node_get_tree_depth(node
);
146 node
= isl_schedule_node_ancestor(node
, depth
- depth0
);
151 /* Assuming "node" is a filter node, does it correspond to the branch
152 * that contains the "thread" mark, i.e., does it contain any elements
155 static int node_is_core(__isl_keep isl_schedule_node
*node
,
156 __isl_keep isl_union_set
*core
)
159 isl_union_set
*filter
;
161 filter
= isl_schedule_node_filter_get_filter(node
);
162 disjoint
= isl_union_set_is_disjoint(filter
, core
);
163 isl_union_set_free(filter
);
170 /* Move to the only child of "node" that has the "thread" mark as descendant,
171 * where the branch containing this mark is identified by the domain elements
174 * If "node" is not a sequence, then it only has one child and we move
175 * to that single child.
176 * Otherwise, we check each of the filters in the children, pick
177 * the one that corresponds to "core" and return a pointer to the child
178 * of the filter node.
180 static __isl_give isl_schedule_node
*core_child(
181 __isl_take isl_schedule_node
*node
, __isl_keep isl_union_set
*core
)
185 if (isl_schedule_node_get_type(node
) != isl_schedule_node_sequence
)
186 return isl_schedule_node_child(node
, 0);
188 n
= isl_schedule_node_n_children(node
);
189 for (i
= 0; i
< n
; ++i
) {
192 node
= isl_schedule_node_child(node
, i
);
193 is_core
= node_is_core(node
, core
);
196 return isl_schedule_node_free(node
);
198 return isl_schedule_node_child(node
, 0);
200 node
= isl_schedule_node_parent(node
);
203 isl_die(isl_schedule_node_get_ctx(node
), isl_error_internal
,
204 "core child not found", return isl_schedule_node_free(node
));
207 /* Move down the branch between "kernel" and "thread" until
208 * the "shared" mark is reached, where the branch containing the "shared"
209 * mark is identified by the domain elements in "core".
211 __isl_give isl_schedule_node
*gpu_tree_move_down_to_shared(
212 __isl_take isl_schedule_node
*node
, __isl_keep isl_union_set
*core
)
216 while ((is_shared
= node_is_shared(node
)) == 0)
217 node
= core_child(node
, core
);
219 node
= isl_schedule_node_free(node
);
224 /* Move down the branch between "kernel" and "thread" until
225 * the "thread" mark is reached, where the branch containing the "thread"
226 * mark is identified by the domain elements in "core".
228 __isl_give isl_schedule_node
*gpu_tree_move_down_to_thread(
229 __isl_take isl_schedule_node
*node
, __isl_keep isl_union_set
*core
)
233 while ((is_thread
= node_is_thread(node
)) == 0)
234 node
= core_child(node
, core
);
236 node
= isl_schedule_node_free(node
);
241 /* Move up the tree underneath the "thread" mark until
242 * the "thread" mark is reached.
244 __isl_give isl_schedule_node
*gpu_tree_move_up_to_thread(
245 __isl_take isl_schedule_node
*node
)
249 while ((is_thread
= node_is_thread(node
)) == 0)
250 node
= isl_schedule_node_parent(node
);
252 node
= isl_schedule_node_free(node
);
257 /* Move up the tree underneath the "kernel" mark until
258 * the "kernel" mark is reached.
260 __isl_give isl_schedule_node
*gpu_tree_move_up_to_kernel(
261 __isl_take isl_schedule_node
*node
)
265 while ((is_kernel
= gpu_tree_node_is_kernel(node
)) == 0)
266 node
= isl_schedule_node_parent(node
);
268 node
= isl_schedule_node_free(node
);
273 /* Move down from the "kernel" mark (or at least a node with schedule
274 * depth smaller than or equal to "depth") to a band node at schedule
275 * depth "depth". The "thread" mark is assumed to have a schedule
276 * depth greater than or equal to "depth". The branch containing the
277 * "thread" mark is identified by the domain elements in "core".
279 * If the desired schedule depth is in the middle of band node,
280 * then the band node is split into two pieces, the second piece
281 * at the desired schedule depth.
283 __isl_give isl_schedule_node
*gpu_tree_move_down_to_depth(
284 __isl_take isl_schedule_node
*node
, int depth
,
285 __isl_keep isl_union_set
*core
)
290 while (node
&& isl_schedule_node_get_schedule_depth(node
) < depth
) {
291 if (isl_schedule_node_get_type(node
) ==
292 isl_schedule_node_band
) {
293 int node_depth
, node_dim
;
294 node_depth
= isl_schedule_node_get_schedule_depth(node
);
295 node_dim
= isl_schedule_node_band_n_member(node
);
296 if (node_depth
+ node_dim
> depth
)
297 node
= isl_schedule_node_band_split(node
,
300 node
= core_child(node
, core
);
302 while ((is_shared
= node_is_shared(node
)) == 0 &&
303 (is_thread
= node_is_thread(node
)) == 0 &&
304 isl_schedule_node_get_type(node
) != isl_schedule_node_band
)
305 node
= core_child(node
, core
);
306 if (is_shared
< 0 || is_thread
< 0)
307 node
= isl_schedule_node_free(node
);
312 /* Create a union set containing a single set with a tuple identifier
313 * called "syncX" and user pointer equal to "kernel".
315 static __isl_give isl_union_set
*create_sync_domain(struct ppcg_kernel
*kernel
)
321 space
= isl_space_set_alloc(kernel
->ctx
, 0, 0);
322 snprintf(name
, sizeof(name
), "sync%d", kernel
->n_sync
++);
323 id
= isl_id_alloc(kernel
->ctx
, name
, kernel
);
324 space
= isl_space_set_tuple_id(space
, isl_dim_set
, id
);
325 return isl_union_set_from_set(isl_set_universe(space
));
328 /* Is "id" the identifier of a synchronization statement inside "kernel"?
329 * That is, does its name start with "sync" and does it point to "kernel"?
331 int gpu_tree_id_is_sync(__isl_keep isl_id
*id
, struct ppcg_kernel
*kernel
)
335 name
= isl_id_get_name(id
);
338 else if (strncmp(name
, "sync", 4))
340 return isl_id_get_user(id
) == kernel
;
343 /* Does "domain" consist of a single set with a tuple identifier
344 * corresponding to a synchronization for "kernel"?
346 static int domain_is_sync(__isl_keep isl_union_set
*domain
,
347 struct ppcg_kernel
*kernel
)
353 if (isl_union_set_n_set(domain
) != 1)
355 set
= isl_set_from_union_set(isl_union_set_copy(domain
));
356 id
= isl_set_get_tuple_id(set
);
357 is_sync
= gpu_tree_id_is_sync(id
, kernel
);
364 /* Does "node" point to a filter selecting a synchronization statement
367 static int node_is_sync_filter(__isl_keep isl_schedule_node
*node
,
368 struct ppcg_kernel
*kernel
)
371 enum isl_schedule_node_type type
;
372 isl_union_set
*domain
;
376 type
= isl_schedule_node_get_type(node
);
377 if (type
!= isl_schedule_node_filter
)
379 domain
= isl_schedule_node_filter_get_filter(node
);
380 is_sync
= domain_is_sync(domain
, kernel
);
381 isl_union_set_free(domain
);
386 /* Is "node" part of a sequence with a previous synchronization statement
388 * That is, is the parent of "node" a filter such that there is
389 * a previous filter that picks out exactly such a synchronization statement?
391 static int has_preceding_sync(__isl_keep isl_schedule_node
*node
,
392 struct ppcg_kernel
*kernel
)
396 node
= isl_schedule_node_copy(node
);
397 node
= isl_schedule_node_parent(node
);
398 while (!found
&& isl_schedule_node_has_previous_sibling(node
)) {
399 node
= isl_schedule_node_previous_sibling(node
);
402 found
= node_is_sync_filter(node
, kernel
);
406 isl_schedule_node_free(node
);
411 /* Is "node" part of a sequence with a subsequent synchronization statement
413 * That is, is the parent of "node" a filter such that there is
414 * a subsequent filter that picks out exactly such a synchronization statement?
416 static int has_following_sync(__isl_keep isl_schedule_node
*node
,
417 struct ppcg_kernel
*kernel
)
421 node
= isl_schedule_node_copy(node
);
422 node
= isl_schedule_node_parent(node
);
423 while (!found
&& isl_schedule_node_has_next_sibling(node
)) {
424 node
= isl_schedule_node_next_sibling(node
);
427 found
= node_is_sync_filter(node
, kernel
);
431 isl_schedule_node_free(node
);
436 /* Does the subtree rooted at "node" (which is a band node) contain
437 * any synchronization statement for "kernel" that precedes
438 * the core computation of "kernel" (identified by the elements
441 static int has_sync_before_core(__isl_keep isl_schedule_node
*node
,
442 struct ppcg_kernel
*kernel
)
447 node
= isl_schedule_node_copy(node
);
448 while ((is_thread
= node_is_thread(node
)) == 0) {
449 node
= core_child(node
, kernel
->core
);
450 has_sync
= has_preceding_sync(node
, kernel
);
451 if (has_sync
< 0 || has_sync
)
454 if (is_thread
< 0 || !node
)
456 isl_schedule_node_free(node
);
461 /* Does the subtree rooted at "node" (which is a band node) contain
462 * any synchronization statement for "kernel" that follows
463 * the core computation of "kernel" (identified by the elements
466 static int has_sync_after_core(__isl_keep isl_schedule_node
*node
,
467 struct ppcg_kernel
*kernel
)
472 node
= isl_schedule_node_copy(node
);
473 while ((is_thread
= node_is_thread(node
)) == 0) {
474 node
= core_child(node
, kernel
->core
);
475 has_sync
= has_following_sync(node
, kernel
);
476 if (has_sync
< 0 || has_sync
)
479 if (is_thread
< 0 || !node
)
481 isl_schedule_node_free(node
);
486 /* Insert (or extend) an extension on top of "node" that puts
487 * a synchronization node for "kernel" before "node".
488 * Return a pointer to the original node in the updated schedule tree.
490 static __isl_give isl_schedule_node
*insert_sync_before(
491 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
493 isl_union_set
*domain
;
494 isl_schedule_node
*graft
;
499 domain
= create_sync_domain(kernel
);
500 graft
= isl_schedule_node_from_domain(domain
);
501 node
= isl_schedule_node_graft_before(node
, graft
);
506 /* Insert (or extend) an extension on top of "node" that puts
507 * a synchronization node for "kernel" afater "node".
508 * Return a pointer to the original node in the updated schedule tree.
510 static __isl_give isl_schedule_node
*insert_sync_after(
511 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
513 isl_union_set
*domain
;
514 isl_schedule_node
*graft
;
519 domain
= create_sync_domain(kernel
);
520 graft
= isl_schedule_node_from_domain(domain
);
521 node
= isl_schedule_node_graft_after(node
, graft
);
526 /* Insert an extension on top of "node" that puts a synchronization node
527 * for "kernel" before "node" unless there already is
528 * such a synchronization node.
530 __isl_give isl_schedule_node
*gpu_tree_ensure_preceding_sync(
531 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
535 has_sync
= has_preceding_sync(node
, kernel
);
537 return isl_schedule_node_free(node
);
540 return insert_sync_before(node
, kernel
);
543 /* Insert an extension on top of "node" that puts a synchronization node
544 * for "kernel" after "node" unless there already is
545 * such a synchronization node.
547 __isl_give isl_schedule_node
*gpu_tree_ensure_following_sync(
548 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
552 has_sync
= has_following_sync(node
, kernel
);
554 return isl_schedule_node_free(node
);
557 return insert_sync_after(node
, kernel
);
560 /* Insert an extension on top of "node" that puts a synchronization node
561 * for "kernel" after "node" unless there already is such a sync node or
562 * "node" itself already * contains a synchronization node following
563 * the core computation of "kernel".
565 __isl_give isl_schedule_node
*gpu_tree_ensure_sync_after_core(
566 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
570 has_sync
= has_sync_after_core(node
, kernel
);
572 return isl_schedule_node_free(node
);
575 has_sync
= has_following_sync(node
, kernel
);
577 return isl_schedule_node_free(node
);
580 return insert_sync_after(node
, kernel
);
583 /* Move left in the sequence on top of "node" to a synchronization node
585 * If "node" itself contains a synchronization node preceding
586 * the core computation of "kernel", then return "node" itself.
587 * Otherwise, if "node" does not have a preceding synchronization node,
588 * then create one first.
590 __isl_give isl_schedule_node
*gpu_tree_move_left_to_sync(
591 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
596 has_sync
= has_sync_before_core(node
, kernel
);
598 return isl_schedule_node_free(node
);
601 node
= gpu_tree_ensure_preceding_sync(node
, kernel
);
602 node
= isl_schedule_node_parent(node
);
603 while ((is_sync
= node_is_sync_filter(node
, kernel
)) == 0)
604 node
= isl_schedule_node_previous_sibling(node
);
606 node
= isl_schedule_node_free(node
);
607 node
= isl_schedule_node_child(node
, 0);
612 /* Move right in the sequence on top of "node" to a synchronization node
614 * If "node" itself contains a synchronization node following
615 * the core computation of "kernel", then return "node" itself.
616 * Otherwise, if "node" does not have a following synchronization node,
617 * then create one first.
619 __isl_give isl_schedule_node
*gpu_tree_move_right_to_sync(
620 __isl_take isl_schedule_node
*node
, struct ppcg_kernel
*kernel
)
625 has_sync
= has_sync_after_core(node
, kernel
);
627 return isl_schedule_node_free(node
);
630 node
= gpu_tree_ensure_following_sync(node
, kernel
);
631 node
= isl_schedule_node_parent(node
);
632 while ((is_sync
= node_is_sync_filter(node
, kernel
)) == 0)
633 node
= isl_schedule_node_next_sibling(node
);
635 node
= isl_schedule_node_free(node
);
636 node
= isl_schedule_node_child(node
, 0);