2 * Copyright 2012 INRIA Paris-Rocquencourt
3 * Copyright 2012 Ecole Normale Superieure
5 * Use of this software is governed by the MIT license
7 * Written by Tobias Grosser, INRIA Paris-Rocquencourt,
8 * Domaine de Voluceau, Rocquenqourt, B.P. 105,
9 * 78153 Le Chesnay Cedex France
10 * and Sven Verdoolaege,
11 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
22 #include <isl/ast_build.h>
23 #include <isl/schedule.h>
24 #include <isl/schedule_node.h>
28 #include "ppcg_options.h"
34 /* Representation of a statement inside a generated AST.
36 * "stmt" refers to the original statement.
37 * "ref2expr" maps the reference identifier of each access in
38 * the statement to an AST expression that should be printed
39 * at the place of the access.
42 struct pet_stmt
*stmt
;
44 isl_id_to_ast_expr
*ref2expr
;
47 static void ppcg_stmt_free(void *user
)
49 struct ppcg_stmt
*stmt
= user
;
54 isl_id_to_ast_expr_free(stmt
->ref2expr
);
59 /* Derive the output file name from the input file name.
60 * 'input' is the entire path of the input file. The output
61 * is the file name plus the additional extension.
63 * We will basically replace everything after the last point
64 * with '.ppcg.c'. This means file.c becomes file.ppcg.c
66 static FILE *get_output_file(const char *input
, const char *output
)
70 const char ppcg_marker
[] = ".ppcg";
74 len
= ppcg_extract_base_name(name
, input
);
76 strcpy(name
+ len
, ppcg_marker
);
77 ext
= strrchr(input
, '.');
78 strcpy(name
+ len
+ sizeof(ppcg_marker
) - 1, ext
? ext
: ".c");
83 file
= fopen(output
, "w");
85 fprintf(stderr
, "Unable to open '%s' for writing\n", output
);
92 /* Data used to annotate for nodes in the ast.
94 struct ast_node_userinfo
{
95 /* The for node is an openmp parallel for node. */
99 /* Information used while building the ast.
101 struct ast_build_userinfo
{
102 /* The current ppcg scop. */
103 struct ppcg_scop
*scop
;
105 /* Are we currently in a parallel for loop? */
109 /* Check if the current scheduling dimension is parallel.
111 * We check for parallelism by verifying that the loop does not carry any
113 * If the live_range_reordering option is set, then this currently
114 * includes the order dependences. In principle, non-zero order dependences
115 * could be allowed, but this would require privatization and/or expansion.
117 * Parallelism test: if the distance is zero in all outer dimensions, then it
118 * has to be zero in the current dimension as well.
119 * Implementation: first, translate dependences into time space, then force
120 * outer dimensions to be equal. If the distance is zero in the current
121 * dimension, then the loop is parallel.
122 * The distance is zero in the current dimension if it is a subset of a map
123 * with equal values for the current dimension.
125 static int ast_schedule_dim_is_parallel(__isl_keep isl_ast_build
*build
,
126 struct ppcg_scop
*scop
)
128 isl_union_map
*schedule
, *deps
;
129 isl_map
*schedule_deps
, *test
;
130 isl_space
*schedule_space
;
131 unsigned i
, dimension
, is_parallel
;
133 schedule
= isl_ast_build_get_schedule(build
);
134 schedule_space
= isl_ast_build_get_schedule_space(build
);
136 dimension
= isl_space_dim(schedule_space
, isl_dim_out
) - 1;
138 deps
= isl_union_map_copy(scop
->dep_flow
);
139 deps
= isl_union_map_union(deps
, isl_union_map_copy(scop
->dep_false
));
140 if (scop
->options
->live_range_reordering
) {
141 isl_union_map
*order
= isl_union_map_copy(scop
->dep_order
);
142 deps
= isl_union_map_union(deps
, order
);
144 deps
= isl_union_map_apply_range(deps
, isl_union_map_copy(schedule
));
145 deps
= isl_union_map_apply_domain(deps
, schedule
);
147 if (isl_union_map_is_empty(deps
)) {
148 isl_union_map_free(deps
);
149 isl_space_free(schedule_space
);
153 schedule_deps
= isl_map_from_union_map(deps
);
155 for (i
= 0; i
< dimension
; i
++)
156 schedule_deps
= isl_map_equate(schedule_deps
, isl_dim_out
, i
,
159 test
= isl_map_universe(isl_map_get_space(schedule_deps
));
160 test
= isl_map_equate(test
, isl_dim_out
, dimension
, isl_dim_in
,
162 is_parallel
= isl_map_is_subset(schedule_deps
, test
);
164 isl_space_free(schedule_space
);
166 isl_map_free(schedule_deps
);
171 /* Mark a for node openmp parallel, if it is the outermost parallel for node.
173 static void mark_openmp_parallel(__isl_keep isl_ast_build
*build
,
174 struct ast_build_userinfo
*build_info
,
175 struct ast_node_userinfo
*node_info
)
177 if (build_info
->in_parallel_for
)
180 if (ast_schedule_dim_is_parallel(build
, build_info
->scop
)) {
181 build_info
->in_parallel_for
= 1;
182 node_info
->is_openmp
= 1;
186 /* Allocate an ast_node_info structure and initialize it with default values.
188 static struct ast_node_userinfo
*allocate_ast_node_userinfo()
190 struct ast_node_userinfo
*node_info
;
191 node_info
= (struct ast_node_userinfo
*)
192 malloc(sizeof(struct ast_node_userinfo
));
193 node_info
->is_openmp
= 0;
197 /* Free an ast_node_info structure.
199 static void free_ast_node_userinfo(void *ptr
)
201 struct ast_node_userinfo
*info
;
202 info
= (struct ast_node_userinfo
*) ptr
;
206 /* This method is executed before the construction of a for node. It creates
207 * an isl_id that is used to annotate the subsequently generated ast for nodes.
209 * In this function we also run the following analyses:
211 * - Detection of openmp parallel loops
213 static __isl_give isl_id
*ast_build_before_for(
214 __isl_keep isl_ast_build
*build
, void *user
)
217 struct ast_build_userinfo
*build_info
;
218 struct ast_node_userinfo
*node_info
;
220 build_info
= (struct ast_build_userinfo
*) user
;
221 node_info
= allocate_ast_node_userinfo();
222 id
= isl_id_alloc(isl_ast_build_get_ctx(build
), "", node_info
);
223 id
= isl_id_set_free_user(id
, free_ast_node_userinfo
);
225 mark_openmp_parallel(build
, build_info
, node_info
);
230 /* This method is executed after the construction of a for node.
232 * It performs the following actions:
234 * - Reset the 'in_parallel_for' flag, as soon as we leave a for node,
235 * that is marked as openmp parallel.
238 static __isl_give isl_ast_node
*ast_build_after_for(
239 __isl_take isl_ast_node
*node
, __isl_keep isl_ast_build
*build
,
243 struct ast_build_userinfo
*build_info
;
244 struct ast_node_userinfo
*info
;
246 id
= isl_ast_node_get_annotation(node
);
247 info
= isl_id_get_user(id
);
249 if (info
&& info
->is_openmp
) {
250 build_info
= (struct ast_build_userinfo
*) user
;
251 build_info
->in_parallel_for
= 0;
259 /* Find the element in scop->stmts that has the given "id".
261 static struct pet_stmt
*find_stmt(struct ppcg_scop
*scop
, __isl_keep isl_id
*id
)
265 for (i
= 0; i
< scop
->pet
->n_stmt
; ++i
) {
266 struct pet_stmt
*stmt
= scop
->pet
->stmts
[i
];
269 id_i
= isl_set_get_tuple_id(stmt
->domain
);
276 isl_die(isl_id_get_ctx(id
), isl_error_internal
,
277 "statement not found", return NULL
);
280 /* Print a user statement in the generated AST.
281 * The ppcg_stmt has been attached to the node in at_each_domain.
283 static __isl_give isl_printer
*print_user(__isl_take isl_printer
*p
,
284 __isl_take isl_ast_print_options
*print_options
,
285 __isl_keep isl_ast_node
*node
, void *user
)
287 struct ppcg_stmt
*stmt
;
290 id
= isl_ast_node_get_annotation(node
);
291 stmt
= isl_id_get_user(id
);
294 p
= pet_stmt_print_body(stmt
->stmt
, p
, stmt
->ref2expr
);
296 isl_ast_print_options_free(print_options
);
302 /* Print a for loop node as an openmp parallel loop.
304 * To print an openmp parallel loop we print a normal for loop, but add
305 * "#pragma openmp parallel for" in front.
307 * Variables that are declared within the body of this for loop are
308 * automatically openmp 'private'. Iterators declared outside of the
309 * for loop are automatically openmp 'shared'. As ppcg declares all iterators
310 * at the position where they are assigned, there is no need to explicitly mark
311 * variables. Their automatically assigned type is already correct.
313 * This function only generates valid OpenMP code, if the ast was generated
314 * with the 'atomic-bounds' option enabled.
317 static __isl_give isl_printer
*print_for_with_openmp(
318 __isl_keep isl_ast_node
*node
, __isl_take isl_printer
*p
,
319 __isl_take isl_ast_print_options
*print_options
)
321 p
= isl_printer_start_line(p
);
322 p
= isl_printer_print_str(p
, "#pragma omp parallel for");
323 p
= isl_printer_end_line(p
);
325 p
= isl_ast_node_for_print(node
, p
, print_options
);
332 * Depending on how the node is annotated, we either print a normal
333 * for node or an openmp parallel for node.
335 static __isl_give isl_printer
*print_for(__isl_take isl_printer
*p
,
336 __isl_take isl_ast_print_options
*print_options
,
337 __isl_keep isl_ast_node
*node
, void *user
)
343 id
= isl_ast_node_get_annotation(node
);
346 struct ast_node_userinfo
*info
;
348 info
= (struct ast_node_userinfo
*) isl_id_get_user(id
);
349 if (info
&& info
->is_openmp
)
354 p
= print_for_with_openmp(node
, p
, print_options
);
356 p
= isl_ast_node_for_print(node
, p
, print_options
);
363 /* Index transformation callback for pet_stmt_build_ast_exprs.
365 * "index" expresses the array indices in terms of statement iterators
366 * "iterator_map" expresses the statement iterators in terms of
367 * AST loop iterators.
369 * The result expresses the array indices in terms of
370 * AST loop iterators.
372 static __isl_give isl_multi_pw_aff
*pullback_index(
373 __isl_take isl_multi_pw_aff
*index
, __isl_keep isl_id
*id
, void *user
)
375 isl_pw_multi_aff
*iterator_map
= user
;
377 iterator_map
= isl_pw_multi_aff_copy(iterator_map
);
378 return isl_multi_pw_aff_pullback_pw_multi_aff(index
, iterator_map
);
381 /* Transform the accesses in the statement associated to the domain
382 * called by "node" to refer to the AST loop iterators, construct
383 * corresponding AST expressions using "build",
384 * collect them in a ppcg_stmt and annotate the node with the ppcg_stmt.
386 static __isl_give isl_ast_node
*at_each_domain(__isl_take isl_ast_node
*node
,
387 __isl_keep isl_ast_build
*build
, void *user
)
389 struct ppcg_scop
*scop
= user
;
390 isl_ast_expr
*expr
, *arg
;
394 isl_pw_multi_aff
*iterator_map
;
395 struct ppcg_stmt
*stmt
;
397 ctx
= isl_ast_node_get_ctx(node
);
398 stmt
= isl_calloc_type(ctx
, struct ppcg_stmt
);
402 expr
= isl_ast_node_user_get_expr(node
);
403 arg
= isl_ast_expr_get_op_arg(expr
, 0);
404 isl_ast_expr_free(expr
);
405 id
= isl_ast_expr_get_id(arg
);
406 isl_ast_expr_free(arg
);
407 stmt
->stmt
= find_stmt(scop
, id
);
412 map
= isl_map_from_union_map(isl_ast_build_get_schedule(build
));
413 map
= isl_map_reverse(map
);
414 iterator_map
= isl_pw_multi_aff_from_map(map
);
415 stmt
->ref2expr
= pet_stmt_build_ast_exprs(stmt
->stmt
, build
,
416 &pullback_index
, iterator_map
, NULL
, NULL
);
417 isl_pw_multi_aff_free(iterator_map
);
419 id
= isl_id_alloc(isl_ast_node_get_ctx(node
), NULL
, stmt
);
420 id
= isl_id_set_free_user(id
, &ppcg_stmt_free
);
421 return isl_ast_node_set_annotation(node
, id
);
423 ppcg_stmt_free(stmt
);
424 return isl_ast_node_free(node
);
427 /* Set *depth (initialized to 0 by the caller) to the maximum
428 * of the schedule depths of the leaf nodes for which this function is called.
430 static isl_bool
update_depth(__isl_keep isl_schedule_node
*node
, void *user
)
435 if (isl_schedule_node_get_type(node
) != isl_schedule_node_leaf
)
436 return isl_bool_true
;
437 node_depth
= isl_schedule_node_get_schedule_depth(node
);
438 if (node_depth
> *depth
)
441 return isl_bool_false
;
444 /* This function is called for each node in a CPU AST.
445 * In case of a user node, print the macro definitions required
446 * for printing the AST expressions in the annotation, if any.
447 * For other nodes, return true such that descendants are also
450 * In particular, print the macro definitions needed for the substitutions
451 * of the original user statements.
453 static isl_bool
at_node(__isl_keep isl_ast_node
*node
, void *user
)
455 struct ppcg_stmt
*stmt
;
457 isl_printer
**p
= user
;
459 if (isl_ast_node_get_type(node
) != isl_ast_node_user
)
460 return isl_bool_true
;
462 id
= isl_ast_node_get_annotation(node
);
463 stmt
= isl_id_get_user(id
);
467 return isl_bool_error
;
469 *p
= ppcg_print_body_macros(*p
, stmt
->ref2expr
);
471 return isl_bool_error
;
473 return isl_bool_false
;
476 /* Print the required macros for the CPU AST "node" to "p",
477 * including those needed for the user statements inside the AST.
479 static __isl_give isl_printer
*cpu_print_macros(__isl_take isl_printer
*p
,
480 __isl_keep isl_ast_node
*node
)
482 if (isl_ast_node_foreach_descendant_top_down(node
, &at_node
, &p
) < 0)
483 return isl_printer_free(p
);
484 p
= ppcg_print_macros(p
, node
);
488 /* Code generate the scop 'scop' using "schedule"
489 * and print the corresponding C code to 'p'.
491 static __isl_give isl_printer
*print_scop(struct ppcg_scop
*scop
,
492 __isl_take isl_schedule
*schedule
, __isl_take isl_printer
*p
,
493 struct ppcg_options
*options
)
495 isl_ctx
*ctx
= isl_printer_get_ctx(p
);
496 isl_ast_build
*build
;
497 isl_ast_print_options
*print_options
;
499 isl_id_list
*iterators
;
500 struct ast_build_userinfo build_info
;
504 if (isl_schedule_foreach_schedule_node_top_down(schedule
, &update_depth
,
508 build
= isl_ast_build_alloc(ctx
);
509 iterators
= ppcg_scop_generate_names(scop
, depth
, "c");
510 build
= isl_ast_build_set_iterators(build
, iterators
);
511 build
= isl_ast_build_set_at_each_domain(build
, &at_each_domain
, scop
);
513 if (options
->openmp
) {
514 build_info
.scop
= scop
;
515 build_info
.in_parallel_for
= 0;
517 build
= isl_ast_build_set_before_each_for(build
,
518 &ast_build_before_for
,
520 build
= isl_ast_build_set_after_each_for(build
,
521 &ast_build_after_for
,
525 tree
= isl_ast_build_node_from_schedule(build
, schedule
);
526 isl_ast_build_free(build
);
528 print_options
= isl_ast_print_options_alloc(ctx
);
529 print_options
= isl_ast_print_options_set_print_user(print_options
,
532 print_options
= isl_ast_print_options_set_print_for(print_options
,
535 p
= cpu_print_macros(p
, tree
);
536 p
= isl_ast_node_print(tree
, p
, print_options
);
538 isl_ast_node_free(tree
);
542 isl_schedule_free(schedule
);
547 /* Tile the band node "node" with tile sizes "sizes" and
548 * mark all members of the resulting tile node as "atomic".
550 static __isl_give isl_schedule_node
*tile(__isl_take isl_schedule_node
*node
,
551 __isl_take isl_multi_val
*sizes
)
553 node
= isl_schedule_node_band_tile(node
, sizes
);
554 node
= ppcg_set_schedule_node_type(node
, isl_ast_loop_atomic
);
559 /* Tile "node", if it is a band node with at least 2 members.
560 * The tile sizes are set from the "tile_size" option.
562 static __isl_give isl_schedule_node
*tile_band(
563 __isl_take isl_schedule_node
*node
, void *user
)
565 struct ppcg_scop
*scop
= user
;
568 isl_multi_val
*sizes
;
570 if (isl_schedule_node_get_type(node
) != isl_schedule_node_band
)
573 n
= isl_schedule_node_band_n_member(node
);
577 space
= isl_schedule_node_band_get_space(node
);
578 sizes
= ppcg_multi_val_from_int(space
, scop
->options
->tile_size
);
580 return tile(node
, sizes
);
583 /* Construct schedule constraints from the dependences in ps
584 * for the purpose of computing a schedule for a CPU.
586 * The proximity constraints are set to the flow dependences.
588 * If live-range reordering is allowed then the conditional validity
589 * constraints are set to the order dependences with the flow dependences
590 * as condition. That is, a live-range (flow dependence) will be either
591 * local to an iteration of a band or all adjacent order dependences
592 * will be respected by the band.
593 * The validity constraints are set to the union of the flow dependences
594 * and the forced dependences, while the coincidence constraints
595 * are set to the union of the flow dependences, the forced dependences and
596 * the order dependences.
598 * If live-range reordering is not allowed, then both the validity
599 * and the coincidence constraints are set to the union of the flow
600 * dependences and the false dependences.
602 * Note that the coincidence constraints are only set when the "openmp"
603 * options is set. Even though the way openmp pragmas are introduced
604 * does not rely on the coincident property of the schedule band members,
605 * the coincidence constraints do affect the way the schedule is constructed,
606 * such that more schedule dimensions should be detected as parallel
607 * by ast_schedule_dim_is_parallel.
608 * Since the order dependences are also taken into account by
609 * ast_schedule_dim_is_parallel, they are also added to
610 * the coincidence constraints. If the openmp handling learns
611 * how to privatize some memory, then the corresponding order
612 * dependences can be removed from the coincidence constraints.
614 static __isl_give isl_schedule_constraints
*construct_cpu_schedule_constraints(
615 struct ppcg_scop
*ps
)
617 isl_schedule_constraints
*sc
;
618 isl_union_map
*validity
, *coincidence
;
620 sc
= isl_schedule_constraints_on_domain(isl_union_set_copy(ps
->domain
));
621 if (ps
->options
->live_range_reordering
) {
622 sc
= isl_schedule_constraints_set_conditional_validity(sc
,
623 isl_union_map_copy(ps
->tagged_dep_flow
),
624 isl_union_map_copy(ps
->tagged_dep_order
));
625 validity
= isl_union_map_copy(ps
->dep_flow
);
626 validity
= isl_union_map_union(validity
,
627 isl_union_map_copy(ps
->dep_forced
));
628 if (ps
->options
->openmp
) {
629 coincidence
= isl_union_map_copy(validity
);
630 coincidence
= isl_union_map_union(coincidence
,
631 isl_union_map_copy(ps
->dep_order
));
634 validity
= isl_union_map_copy(ps
->dep_flow
);
635 validity
= isl_union_map_union(validity
,
636 isl_union_map_copy(ps
->dep_false
));
637 if (ps
->options
->openmp
)
638 coincidence
= isl_union_map_copy(validity
);
640 if (ps
->options
->openmp
)
641 sc
= isl_schedule_constraints_set_coincidence(sc
, coincidence
);
642 sc
= isl_schedule_constraints_set_validity(sc
, validity
);
643 sc
= isl_schedule_constraints_set_proximity(sc
,
644 isl_union_map_copy(ps
->dep_flow
));
649 /* Compute a schedule for the scop "ps".
651 * First derive the appropriate schedule constraints from the dependences
652 * in "ps" and then compute a schedule from those schedule constraints.
654 static __isl_give isl_schedule
*compute_cpu_schedule(struct ppcg_scop
*ps
)
656 isl_schedule_constraints
*sc
;
657 isl_schedule
*schedule
;
662 sc
= construct_cpu_schedule_constraints(ps
);
664 if (ps
->options
->debug
->dump_schedule_constraints
)
665 isl_schedule_constraints_dump(sc
);
666 schedule
= isl_schedule_constraints_compute_schedule(sc
);
671 /* Compute a new schedule to the scop "ps" if the reschedule option is set.
672 * Otherwise, return a copy of the original schedule.
674 static __isl_give isl_schedule
*optionally_compute_schedule(void *user
)
676 struct ppcg_scop
*ps
= user
;
680 if (!ps
->options
->reschedule
)
681 return isl_schedule_copy(ps
->schedule
);
682 return compute_cpu_schedule(ps
);
685 /* Compute a schedule based on the dependences in "ps" and
686 * tile it if requested by the user.
688 static __isl_give isl_schedule
*get_schedule(struct ppcg_scop
*ps
,
689 struct ppcg_options
*options
)
692 isl_schedule
*schedule
;
697 ctx
= isl_union_set_get_ctx(ps
->domain
);
698 schedule
= ppcg_get_schedule(ctx
, options
,
699 &optionally_compute_schedule
, ps
);
700 if (ps
->options
->tile
)
701 schedule
= isl_schedule_map_schedule_node_bottom_up(schedule
,
707 /* Generate CPU code for the scop "ps" using "schedule" and
708 * print the corresponding C code to "p", including variable declarations.
710 static __isl_give isl_printer
*print_cpu_with_schedule(
711 __isl_take isl_printer
*p
, struct ppcg_scop
*ps
,
712 __isl_take isl_schedule
*schedule
, struct ppcg_options
*options
)
717 p
= isl_printer_start_line(p
);
718 p
= isl_printer_print_str(p
, "/* ppcg generated CPU code */");
719 p
= isl_printer_end_line(p
);
721 p
= isl_printer_start_line(p
);
722 p
= isl_printer_end_line(p
);
724 p
= ppcg_set_macro_names(p
);
725 p
= ppcg_print_exposed_declarations(p
, ps
);
726 hidden
= ppcg_scop_any_hidden_declarations(ps
);
728 p
= ppcg_start_block(p
);
729 p
= ppcg_print_hidden_declarations(p
, ps
);
732 context
= isl_set_copy(ps
->context
);
733 context
= isl_set_from_params(context
);
734 schedule
= isl_schedule_insert_context(schedule
, context
);
735 if (options
->debug
->dump_final_schedule
)
736 isl_schedule_dump(schedule
);
737 p
= print_scop(ps
, schedule
, p
, options
);
739 p
= ppcg_end_block(p
);
744 /* Generate CPU code for the scop "ps" and print the corresponding C code
745 * to "p", including variable declarations.
747 __isl_give isl_printer
*print_cpu(__isl_take isl_printer
*p
,
748 struct ppcg_scop
*ps
, struct ppcg_options
*options
)
750 isl_schedule
*schedule
;
752 schedule
= isl_schedule_copy(ps
->schedule
);
753 return print_cpu_with_schedule(p
, ps
, schedule
, options
);
756 /* Generate CPU code for "scop" and print it to "p".
758 * First obtain a schedule for "scop" and then print code for "scop"
759 * using that schedule.
761 static __isl_give isl_printer
*generate(__isl_take isl_printer
*p
,
762 struct ppcg_scop
*scop
, struct ppcg_options
*options
)
764 isl_schedule
*schedule
;
766 schedule
= get_schedule(scop
, options
);
768 return print_cpu_with_schedule(p
, scop
, schedule
, options
);
771 /* Wrapper around generate for use as a ppcg_transform callback.
773 static __isl_give isl_printer
*print_cpu_wrap(__isl_take isl_printer
*p
,
774 struct ppcg_scop
*scop
, void *user
)
776 struct ppcg_options
*options
= user
;
778 return generate(p
, scop
, options
);
781 /* Transform the code in the file called "input" by replacing
782 * all scops by corresponding CPU code and write the results to a file
785 int generate_cpu(isl_ctx
*ctx
, struct ppcg_options
*options
,
786 const char *input
, const char *output
)
791 output_file
= get_output_file(input
, output
);
795 r
= ppcg_transform(ctx
, input
, output_file
, options
,
796 &print_cpu_wrap
, options
);