gpu.c: update_may_persist_at_band: return isl_stat
[ppcg.git] / gpu_tree.c
blob21ef28dd793e163a015776ed21998e090707c2ab
1 /*
2 * Copyright 2013 Ecole Normale Superieure
4 * Use of this software is governed by the MIT license
6 * Written by Sven Verdoolaege,
7 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
8 */
10 #include <string.h>
12 #include <isl/space.h>
13 #include <isl/set.h>
14 #include <isl/union_set.h>
16 #include "gpu_tree.h"
18 /* The functions in this file are used to navigate part of a schedule tree
19 * that is mapped to blocks. Initially, this part consists of a linear
20 * branch segment with a mark node with name "kernel" on the outer end
21 * and a mark node with name "thread" on the inner end.
22 * During the mapping to blocks, branching may be introduced, but only
23 * one of the elements in each sequence contains the "thread" mark.
24 * The filter of this element (and only this filter) contains
25 * domain elements identified by the "core" argument of the functions
26 * that move down this tree.
28 * Synchronization statements have a name that starts with "sync" and
29 * a user pointer pointing to the kernel that contains the synchronization.
30 * The functions inserting or detecting synchronizations take a ppcg_kernel
31 * argument to be able to create or identify such statements.
32 * They may also use two fields in this structure, the "core" field
33 * to move around in the tree and the "n_sync" field to make sure that
34 * each synchronization has a different name (within the kernel).
37 /* Is "node" a mark node with an identifier called "name"?
39 static int is_marked(__isl_keep isl_schedule_node *node, const char *name)
41 isl_id *mark;
42 int has_name;
44 if (!node)
45 return -1;
47 if (isl_schedule_node_get_type(node) != isl_schedule_node_mark)
48 return 0;
50 mark = isl_schedule_node_mark_get_id(node);
51 if (!mark)
52 return -1;
54 has_name = !strcmp(isl_id_get_name(mark), name);
55 isl_id_free(mark);
57 return has_name;
60 /* Is "node" a mark node with an identifier called "kernel"?
62 int gpu_tree_node_is_kernel(__isl_keep isl_schedule_node *node)
64 return is_marked(node, "kernel");
67 /* Is "node" a mark node with an identifier called "shared"?
69 static int node_is_shared(__isl_keep isl_schedule_node *node)
71 return is_marked(node, "shared");
74 /* Is "node" a mark node with an identifier called "thread"?
76 static int node_is_thread(__isl_keep isl_schedule_node *node)
78 return is_marked(node, "thread");
81 /* Insert a mark node with identifier "shared" in front of "node".
83 static __isl_give isl_schedule_node *insert_shared(
84 __isl_take isl_schedule_node *node)
86 isl_ctx *ctx;
87 isl_id *id;
89 ctx = isl_schedule_node_get_ctx(node);
90 id = isl_id_alloc(ctx, "shared", NULL);
91 node = isl_schedule_node_insert_mark(node, id);
93 return node;
96 /* Insert a "shared" mark in front of the "thread" mark
97 * provided the linear branch between "node" and the "thread" mark
98 * does not contain such a "shared" mark already.
100 * As a side effect, this function checks that the subtree at "node"
101 * actually contains a "thread" mark and that there is no branching
102 * in between "node" and this "thread" mark.
104 __isl_give isl_schedule_node *gpu_tree_insert_shared_before_thread(
105 __isl_take isl_schedule_node *node)
107 int depth0, depth;
108 int any_shared = 0;
110 if (!node)
111 return NULL;
113 depth0 = isl_schedule_node_get_tree_depth(node);
115 for (;;) {
116 int is_thread;
117 int n;
119 if (!any_shared) {
120 any_shared = node_is_shared(node);
121 if (any_shared < 0)
122 return isl_schedule_node_free(node);
124 is_thread = node_is_thread(node);
125 if (is_thread < 0)
126 return isl_schedule_node_free(node);
127 if (is_thread)
128 break;
129 n = isl_schedule_node_n_children(node);
130 if (n == 0)
131 isl_die(isl_schedule_node_get_ctx(node),
132 isl_error_invalid,
133 "no thread marker found",
134 return isl_schedule_node_free(node));
135 if (n > 1)
136 isl_die(isl_schedule_node_get_ctx(node),
137 isl_error_invalid,
138 "expecting single thread marker",
139 return isl_schedule_node_free(node));
141 node = isl_schedule_node_child(node, 0);
144 if (!any_shared)
145 node = insert_shared(node);
146 depth = isl_schedule_node_get_tree_depth(node);
147 node = isl_schedule_node_ancestor(node, depth - depth0);
149 return node;
152 /* Assuming "node" is a filter node, does it correspond to the branch
153 * that contains the "thread" mark, i.e., does it contain any elements
154 * in "core"?
156 static int node_is_core(__isl_keep isl_schedule_node *node,
157 __isl_keep isl_union_set *core)
159 int disjoint;
160 isl_union_set *filter;
162 filter = isl_schedule_node_filter_get_filter(node);
163 disjoint = isl_union_set_is_disjoint(filter, core);
164 isl_union_set_free(filter);
165 if (disjoint < 0)
166 return -1;
168 return !disjoint;
171 /* Move to the only child of "node" that has the "thread" mark as descendant,
172 * where the branch containing this mark is identified by the domain elements
173 * in "core".
175 * If "node" is not a sequence, then it only has one child and we move
176 * to that single child.
177 * Otherwise, we check each of the filters in the children, pick
178 * the one that corresponds to "core" and return a pointer to the child
179 * of the filter node.
181 static __isl_give isl_schedule_node *core_child(
182 __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
184 int i, n;
186 if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
187 return isl_schedule_node_child(node, 0);
189 n = isl_schedule_node_n_children(node);
190 for (i = 0; i < n; ++i) {
191 int is_core;
193 node = isl_schedule_node_child(node, i);
194 is_core = node_is_core(node, core);
196 if (is_core < 0)
197 return isl_schedule_node_free(node);
198 if (is_core)
199 return isl_schedule_node_child(node, 0);
201 node = isl_schedule_node_parent(node);
204 isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
205 "core child not found", return isl_schedule_node_free(node));
208 /* Move down the branch between "kernel" and "thread" until
209 * the "shared" mark is reached, where the branch containing the "shared"
210 * mark is identified by the domain elements in "core".
212 __isl_give isl_schedule_node *gpu_tree_move_down_to_shared(
213 __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
215 int is_shared;
217 while ((is_shared = node_is_shared(node)) == 0)
218 node = core_child(node, core);
219 if (is_shared < 0)
220 node = isl_schedule_node_free(node);
222 return node;
225 /* Move down the branch between "kernel" and "thread" until
226 * the "thread" mark is reached, where the branch containing the "thread"
227 * mark is identified by the domain elements in "core".
229 __isl_give isl_schedule_node *gpu_tree_move_down_to_thread(
230 __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
232 int is_thread;
234 while ((is_thread = node_is_thread(node)) == 0)
235 node = core_child(node, core);
236 if (is_thread < 0)
237 node = isl_schedule_node_free(node);
239 return node;
242 /* Move up the tree underneath the "thread" mark until
243 * the "thread" mark is reached.
245 __isl_give isl_schedule_node *gpu_tree_move_up_to_thread(
246 __isl_take isl_schedule_node *node)
248 int is_thread;
250 while ((is_thread = node_is_thread(node)) == 0)
251 node = isl_schedule_node_parent(node);
252 if (is_thread < 0)
253 node = isl_schedule_node_free(node);
255 return node;
258 /* Move up the tree underneath the "kernel" mark until
259 * the "kernel" mark is reached.
261 __isl_give isl_schedule_node *gpu_tree_move_up_to_kernel(
262 __isl_take isl_schedule_node *node)
264 int is_kernel;
266 while ((is_kernel = gpu_tree_node_is_kernel(node)) == 0)
267 node = isl_schedule_node_parent(node);
268 if (is_kernel < 0)
269 node = isl_schedule_node_free(node);
271 return node;
274 /* Move down from the "kernel" mark (or at least a node with schedule
275 * depth smaller than or equal to "depth") to a band node at schedule
276 * depth "depth". The "thread" mark is assumed to have a schedule
277 * depth greater than or equal to "depth". The branch containing the
278 * "thread" mark is identified by the domain elements in "core".
280 * If the desired schedule depth is in the middle of band node,
281 * then the band node is split into two pieces, the second piece
282 * at the desired schedule depth.
284 __isl_give isl_schedule_node *gpu_tree_move_down_to_depth(
285 __isl_take isl_schedule_node *node, int depth,
286 __isl_keep isl_union_set *core)
288 int is_shared;
289 int is_thread = 0;
291 while (node && isl_schedule_node_get_schedule_depth(node) < depth) {
292 if (isl_schedule_node_get_type(node) ==
293 isl_schedule_node_band) {
294 int node_depth, node_dim;
295 node_depth = isl_schedule_node_get_schedule_depth(node);
296 node_dim = isl_schedule_node_band_n_member(node);
297 if (node_depth + node_dim > depth)
298 node = isl_schedule_node_band_split(node,
299 depth - node_depth);
301 node = core_child(node, core);
303 while ((is_shared = node_is_shared(node)) == 0 &&
304 (is_thread = node_is_thread(node)) == 0 &&
305 isl_schedule_node_get_type(node) != isl_schedule_node_band)
306 node = core_child(node, core);
307 if (is_shared < 0 || is_thread < 0)
308 node = isl_schedule_node_free(node);
310 return node;
313 /* Create a union set containing a single set with a tuple identifier
314 * called "syncX" and user pointer equal to "kernel".
316 static __isl_give isl_union_set *create_sync_domain(struct ppcg_kernel *kernel)
318 isl_space *space;
319 isl_id *id;
320 char name[40];
322 space = isl_space_set_alloc(kernel->ctx, 0, 0);
323 snprintf(name, sizeof(name), "sync%d", kernel->n_sync++);
324 id = isl_id_alloc(kernel->ctx, name, kernel);
325 space = isl_space_set_tuple_id(space, isl_dim_set, id);
326 return isl_union_set_from_set(isl_set_universe(space));
329 /* Is "id" the identifier of a synchronization statement inside "kernel"?
330 * That is, does its name start with "sync" and does it point to "kernel"?
332 int gpu_tree_id_is_sync(__isl_keep isl_id *id, struct ppcg_kernel *kernel)
334 const char *name;
336 name = isl_id_get_name(id);
337 if (!name)
338 return 0;
339 else if (strncmp(name, "sync", 4))
340 return 0;
341 return isl_id_get_user(id) == kernel;
344 /* Does "domain" consist of a single set with a tuple identifier
345 * corresponding to a synchronization for "kernel"?
347 static int domain_is_sync(__isl_keep isl_union_set *domain,
348 struct ppcg_kernel *kernel)
350 int is_sync;
351 isl_id *id;
352 isl_set *set;
354 if (isl_union_set_n_set(domain) != 1)
355 return 0;
356 set = isl_set_from_union_set(isl_union_set_copy(domain));
357 id = isl_set_get_tuple_id(set);
358 is_sync = gpu_tree_id_is_sync(id, kernel);
359 isl_id_free(id);
360 isl_set_free(set);
362 return is_sync;
365 /* Does "node" point to a filter selecting a synchronization statement
366 * for "kernel"?
368 static int node_is_sync_filter(__isl_keep isl_schedule_node *node,
369 struct ppcg_kernel *kernel)
371 int is_sync;
372 enum isl_schedule_node_type type;
373 isl_union_set *domain;
375 if (!node)
376 return -1;
377 type = isl_schedule_node_get_type(node);
378 if (type != isl_schedule_node_filter)
379 return 0;
380 domain = isl_schedule_node_filter_get_filter(node);
381 is_sync = domain_is_sync(domain, kernel);
382 isl_union_set_free(domain);
384 return is_sync;
387 /* Is "node" part of a sequence with a previous synchronization statement
388 * for "kernel"?
389 * That is, is the parent of "node" a filter such that there is
390 * a previous filter that picks out exactly such a synchronization statement?
392 static int has_preceding_sync(__isl_keep isl_schedule_node *node,
393 struct ppcg_kernel *kernel)
395 int found = 0;
397 node = isl_schedule_node_copy(node);
398 node = isl_schedule_node_parent(node);
399 while (!found && isl_schedule_node_has_previous_sibling(node)) {
400 node = isl_schedule_node_previous_sibling(node);
401 if (!node)
402 break;
403 found = node_is_sync_filter(node, kernel);
405 if (!node)
406 found = -1;
407 isl_schedule_node_free(node);
409 return found;
412 /* Is "node" part of a sequence with a subsequent synchronization statement
413 * for "kernel"?
414 * That is, is the parent of "node" a filter such that there is
415 * a subsequent filter that picks out exactly such a synchronization statement?
417 static int has_following_sync(__isl_keep isl_schedule_node *node,
418 struct ppcg_kernel *kernel)
420 int found = 0;
422 node = isl_schedule_node_copy(node);
423 node = isl_schedule_node_parent(node);
424 while (!found && isl_schedule_node_has_next_sibling(node)) {
425 node = isl_schedule_node_next_sibling(node);
426 if (!node)
427 break;
428 found = node_is_sync_filter(node, kernel);
430 if (!node)
431 found = -1;
432 isl_schedule_node_free(node);
434 return found;
437 /* Does the subtree rooted at "node" (which is a band node) contain
438 * any synchronization statement for "kernel" that precedes
439 * the core computation of "kernel" (identified by the elements
440 * in kernel->core)?
442 static int has_sync_before_core(__isl_keep isl_schedule_node *node,
443 struct ppcg_kernel *kernel)
445 int has_sync = 0;
446 int is_thread;
448 node = isl_schedule_node_copy(node);
449 while ((is_thread = node_is_thread(node)) == 0) {
450 node = core_child(node, kernel->core);
451 has_sync = has_preceding_sync(node, kernel);
452 if (has_sync < 0 || has_sync)
453 break;
455 if (is_thread < 0 || !node)
456 has_sync = -1;
457 isl_schedule_node_free(node);
459 return has_sync;
462 /* Does the subtree rooted at "node" (which is a band node) contain
463 * any synchronization statement for "kernel" that follows
464 * the core computation of "kernel" (identified by the elements
465 * in kernel->core)?
467 static int has_sync_after_core(__isl_keep isl_schedule_node *node,
468 struct ppcg_kernel *kernel)
470 int has_sync = 0;
471 int is_thread;
473 node = isl_schedule_node_copy(node);
474 while ((is_thread = node_is_thread(node)) == 0) {
475 node = core_child(node, kernel->core);
476 has_sync = has_following_sync(node, kernel);
477 if (has_sync < 0 || has_sync)
478 break;
480 if (is_thread < 0 || !node)
481 has_sync = -1;
482 isl_schedule_node_free(node);
484 return has_sync;
487 /* Insert (or extend) an extension on top of "node" that puts
488 * a synchronization node for "kernel" before "node".
489 * Return a pointer to the original node in the updated schedule tree.
491 static __isl_give isl_schedule_node *insert_sync_before(
492 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
494 isl_union_set *domain;
495 isl_schedule_node *graft;
497 if (!node)
498 return NULL;
500 domain = create_sync_domain(kernel);
501 graft = isl_schedule_node_from_domain(domain);
502 node = isl_schedule_node_graft_before(node, graft);
504 return node;
507 /* Insert (or extend) an extension on top of "node" that puts
508 * a synchronization node for "kernel" afater "node".
509 * Return a pointer to the original node in the updated schedule tree.
511 static __isl_give isl_schedule_node *insert_sync_after(
512 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
514 isl_union_set *domain;
515 isl_schedule_node *graft;
517 if (!node)
518 return NULL;
520 domain = create_sync_domain(kernel);
521 graft = isl_schedule_node_from_domain(domain);
522 node = isl_schedule_node_graft_after(node, graft);
524 return node;
527 /* Insert an extension on top of "node" that puts a synchronization node
528 * for "kernel" before "node" unless there already is
529 * such a synchronization node.
531 __isl_give isl_schedule_node *gpu_tree_ensure_preceding_sync(
532 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
534 int has_sync;
536 has_sync = has_preceding_sync(node, kernel);
537 if (has_sync < 0)
538 return isl_schedule_node_free(node);
539 if (has_sync)
540 return node;
541 return insert_sync_before(node, kernel);
544 /* Insert an extension on top of "node" that puts a synchronization node
545 * for "kernel" after "node" unless there already is
546 * such a synchronization node.
548 __isl_give isl_schedule_node *gpu_tree_ensure_following_sync(
549 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
551 int has_sync;
553 has_sync = has_following_sync(node, kernel);
554 if (has_sync < 0)
555 return isl_schedule_node_free(node);
556 if (has_sync)
557 return node;
558 return insert_sync_after(node, kernel);
561 /* Insert an extension on top of "node" that puts a synchronization node
562 * for "kernel" after "node" unless there already is such a sync node or
563 * "node" itself already * contains a synchronization node following
564 * the core computation of "kernel".
566 __isl_give isl_schedule_node *gpu_tree_ensure_sync_after_core(
567 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
569 int has_sync;
571 has_sync = has_sync_after_core(node, kernel);
572 if (has_sync < 0)
573 return isl_schedule_node_free(node);
574 if (has_sync)
575 return node;
576 has_sync = has_following_sync(node, kernel);
577 if (has_sync < 0)
578 return isl_schedule_node_free(node);
579 if (has_sync)
580 return node;
581 return insert_sync_after(node, kernel);
584 /* Move left in the sequence on top of "node" to a synchronization node
585 * for "kernel".
586 * If "node" itself contains a synchronization node preceding
587 * the core computation of "kernel", then return "node" itself.
588 * Otherwise, if "node" does not have a preceding synchronization node,
589 * then create one first.
591 __isl_give isl_schedule_node *gpu_tree_move_left_to_sync(
592 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
594 int has_sync;
595 int is_sync;
597 has_sync = has_sync_before_core(node, kernel);
598 if (has_sync < 0)
599 return isl_schedule_node_free(node);
600 if (has_sync)
601 return node;
602 node = gpu_tree_ensure_preceding_sync(node, kernel);
603 node = isl_schedule_node_parent(node);
604 while ((is_sync = node_is_sync_filter(node, kernel)) == 0)
605 node = isl_schedule_node_previous_sibling(node);
606 if (is_sync < 0)
607 node = isl_schedule_node_free(node);
608 node = isl_schedule_node_child(node, 0);
610 return node;
613 /* Move right in the sequence on top of "node" to a synchronization node
614 * for "kernel".
615 * If "node" itself contains a synchronization node following
616 * the core computation of "kernel", then return "node" itself.
617 * Otherwise, if "node" does not have a following synchronization node,
618 * then create one first.
620 __isl_give isl_schedule_node *gpu_tree_move_right_to_sync(
621 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
623 int has_sync;
624 int is_sync;
626 has_sync = has_sync_after_core(node, kernel);
627 if (has_sync < 0)
628 return isl_schedule_node_free(node);
629 if (has_sync)
630 return node;
631 node = gpu_tree_ensure_following_sync(node, kernel);
632 node = isl_schedule_node_parent(node);
633 while ((is_sync = node_is_sync_filter(node, kernel)) == 0)
634 node = isl_schedule_node_next_sibling(node);
635 if (is_sync < 0)
636 node = isl_schedule_node_free(node);
637 node = isl_schedule_node_child(node, 0);
639 return node;