update pet to version 0.11.4
[ppcg.git] / cpu.c
blob00334ed416023498269c2189b773ef46aea46a22
1 /*
2 * Copyright 2012 INRIA Paris-Rocquencourt
3 * Copyright 2012 Ecole Normale Superieure
5 * Use of this software is governed by the MIT license
7 * Written by Tobias Grosser, INRIA Paris-Rocquencourt,
8 * Domaine de Voluceau, Rocquenqourt, B.P. 105,
9 * 78153 Le Chesnay Cedex France
10 * and Sven Verdoolaege,
11 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <limits.h>
15 #include <stdio.h>
16 #include <string.h>
18 #include <isl/aff.h>
19 #include <isl/ctx.h>
20 #include <isl/flow.h>
21 #include <isl/map.h>
22 #include <isl/ast_build.h>
23 #include <isl/schedule.h>
24 #include <isl/schedule_node.h>
25 #include <pet.h>
27 #include "ppcg.h"
28 #include "ppcg_options.h"
29 #include "cpu.h"
30 #include "print.h"
31 #include "schedule.h"
32 #include "util.h"
34 /* Representation of a statement inside a generated AST.
36 * "stmt" refers to the original statement.
37 * "ref2expr" maps the reference identifier of each access in
38 * the statement to an AST expression that should be printed
39 * at the place of the access.
41 struct ppcg_stmt {
42 struct pet_stmt *stmt;
44 isl_id_to_ast_expr *ref2expr;
47 static void ppcg_stmt_free(void *user)
49 struct ppcg_stmt *stmt = user;
51 if (!stmt)
52 return;
54 isl_id_to_ast_expr_free(stmt->ref2expr);
56 free(stmt);
59 /* Derive the output file name from the input file name.
60 * 'input' is the entire path of the input file. The output
61 * is the file name plus the additional extension.
63 * We will basically replace everything after the last point
64 * with '.ppcg.c'. This means file.c becomes file.ppcg.c
66 static FILE *get_output_file(const char *input, const char *output)
68 char name[PATH_MAX];
69 const char *ext;
70 const char ppcg_marker[] = ".ppcg";
71 int len;
72 FILE *file;
74 len = ppcg_extract_base_name(name, input);
76 strcpy(name + len, ppcg_marker);
77 ext = strrchr(input, '.');
78 strcpy(name + len + sizeof(ppcg_marker) - 1, ext ? ext : ".c");
80 if (!output)
81 output = name;
83 file = fopen(output, "w");
84 if (!file) {
85 fprintf(stderr, "Unable to open '%s' for writing\n", output);
86 return NULL;
89 return file;
92 /* Data used to annotate for nodes in the ast.
94 struct ast_node_userinfo {
95 /* The for node is an openmp parallel for node. */
96 int is_openmp;
99 /* Information used while building the ast.
101 struct ast_build_userinfo {
102 /* The current ppcg scop. */
103 struct ppcg_scop *scop;
105 /* Are we currently in a parallel for loop? */
106 int in_parallel_for;
108 /* The contraction of the entire schedule tree. */
109 isl_union_pw_multi_aff *contraction;
112 /* Check if the current scheduling dimension is parallel.
114 * We check for parallelism by verifying that the loop does not carry any
115 * dependences.
117 * If any expansion nodes are present in the schedule tree,
118 * then they are assumed to be situated near the leaves of the schedule tree,
119 * underneath any node that may result in a for loop.
120 * In particular, these expansions may have been introduced
121 * by the call to isl_schedule_expand inside ppcg_compute_grouping_schedule.
122 * The dependence relations are formulated in terms of the expanded
123 * domains, while, by assumption, the partial schedule returned
124 * by isl_ast_build_get_schedule refers to the contracted domains.
125 * Plug in the contraction such that the schedule would also
126 * refer to the expanded domains.
127 * Note that if the schedule tree does not contain any expansions,
128 * then the contraction is an identity function.
130 * If the live_range_reordering option is set, then this currently
131 * includes the order dependences. In principle, non-zero order dependences
132 * could be allowed, but this would require privatization and/or expansion.
134 * Parallelism test: if the distance is zero in all outer dimensions, then it
135 * has to be zero in the current dimension as well.
136 * Implementation: first, translate dependences into time space, then force
137 * outer dimensions to be equal. If the distance is zero in the current
138 * dimension, then the loop is parallel.
139 * The distance is zero in the current dimension if it is a subset of a map
140 * with equal values for the current dimension.
142 static int ast_schedule_dim_is_parallel(__isl_keep isl_ast_build *build,
143 struct ast_build_userinfo *build_info)
145 struct ppcg_scop *scop = build_info->scop;
146 isl_union_map *schedule, *deps;
147 isl_map *schedule_deps, *test;
148 isl_space *schedule_space;
149 unsigned i, dimension, is_parallel;
151 schedule = isl_ast_build_get_schedule(build);
152 schedule = isl_union_map_preimage_domain_union_pw_multi_aff(schedule,
153 isl_union_pw_multi_aff_copy(build_info->contraction));
154 schedule_space = isl_ast_build_get_schedule_space(build);
156 dimension = isl_space_dim(schedule_space, isl_dim_out) - 1;
158 deps = isl_union_map_copy(scop->dep_flow);
159 deps = isl_union_map_union(deps, isl_union_map_copy(scop->dep_false));
160 if (scop->options->live_range_reordering) {
161 isl_union_map *order = isl_union_map_copy(scop->dep_order);
162 deps = isl_union_map_union(deps, order);
164 deps = isl_union_map_apply_range(deps, isl_union_map_copy(schedule));
165 deps = isl_union_map_apply_domain(deps, schedule);
167 if (isl_union_map_is_empty(deps)) {
168 isl_union_map_free(deps);
169 isl_space_free(schedule_space);
170 return 1;
173 schedule_deps = isl_map_from_union_map(deps);
175 for (i = 0; i < dimension; i++)
176 schedule_deps = isl_map_equate(schedule_deps, isl_dim_out, i,
177 isl_dim_in, i);
179 test = isl_map_universe(isl_map_get_space(schedule_deps));
180 test = isl_map_equate(test, isl_dim_out, dimension, isl_dim_in,
181 dimension);
182 is_parallel = isl_map_is_subset(schedule_deps, test);
184 isl_space_free(schedule_space);
185 isl_map_free(test);
186 isl_map_free(schedule_deps);
188 return is_parallel;
191 /* Mark a for node openmp parallel, if it is the outermost parallel for node.
193 static void mark_openmp_parallel(__isl_keep isl_ast_build *build,
194 struct ast_build_userinfo *build_info,
195 struct ast_node_userinfo *node_info)
197 if (build_info->in_parallel_for)
198 return;
200 if (ast_schedule_dim_is_parallel(build, build_info)) {
201 build_info->in_parallel_for = 1;
202 node_info->is_openmp = 1;
206 /* Allocate an ast_node_info structure and initialize it with default values.
208 static struct ast_node_userinfo *allocate_ast_node_userinfo()
210 struct ast_node_userinfo *node_info;
211 node_info = (struct ast_node_userinfo *)
212 malloc(sizeof(struct ast_node_userinfo));
213 node_info->is_openmp = 0;
214 return node_info;
217 /* Free an ast_node_info structure.
219 static void free_ast_node_userinfo(void *ptr)
221 struct ast_node_userinfo *info;
222 info = (struct ast_node_userinfo *) ptr;
223 free(info);
226 /* This method is executed before the construction of a for node. It creates
227 * an isl_id that is used to annotate the subsequently generated ast for nodes.
229 * In this function we also run the following analyses:
231 * - Detection of openmp parallel loops
233 static __isl_give isl_id *ast_build_before_for(
234 __isl_keep isl_ast_build *build, void *user)
236 isl_id *id;
237 struct ast_build_userinfo *build_info;
238 struct ast_node_userinfo *node_info;
240 build_info = (struct ast_build_userinfo *) user;
241 node_info = allocate_ast_node_userinfo();
242 id = isl_id_alloc(isl_ast_build_get_ctx(build), "", node_info);
243 id = isl_id_set_free_user(id, free_ast_node_userinfo);
245 mark_openmp_parallel(build, build_info, node_info);
247 return id;
250 /* This method is executed after the construction of a for node.
252 * It performs the following actions:
254 * - Reset the 'in_parallel_for' flag, as soon as we leave a for node,
255 * that is marked as openmp parallel.
258 static __isl_give isl_ast_node *ast_build_after_for(
259 __isl_take isl_ast_node *node, __isl_keep isl_ast_build *build,
260 void *user)
262 isl_id *id;
263 struct ast_build_userinfo *build_info;
264 struct ast_node_userinfo *info;
266 id = isl_ast_node_get_annotation(node);
267 info = isl_id_get_user(id);
269 if (info && info->is_openmp) {
270 build_info = (struct ast_build_userinfo *) user;
271 build_info->in_parallel_for = 0;
274 isl_id_free(id);
276 return node;
279 /* Find the element in scop->stmts that has the given "id".
281 static struct pet_stmt *find_stmt(struct ppcg_scop *scop, __isl_keep isl_id *id)
283 int i;
285 for (i = 0; i < scop->pet->n_stmt; ++i) {
286 struct pet_stmt *stmt = scop->pet->stmts[i];
287 isl_id *id_i;
289 id_i = isl_set_get_tuple_id(stmt->domain);
290 isl_id_free(id_i);
292 if (id_i == id)
293 return stmt;
296 isl_die(isl_id_get_ctx(id), isl_error_internal,
297 "statement not found", return NULL);
300 /* Print a user statement in the generated AST.
301 * The ppcg_stmt has been attached to the node in at_each_domain.
303 static __isl_give isl_printer *print_user(__isl_take isl_printer *p,
304 __isl_take isl_ast_print_options *print_options,
305 __isl_keep isl_ast_node *node, void *user)
307 struct ppcg_stmt *stmt;
308 isl_id *id;
310 id = isl_ast_node_get_annotation(node);
311 stmt = isl_id_get_user(id);
312 isl_id_free(id);
314 p = pet_stmt_print_body(stmt->stmt, p, stmt->ref2expr);
316 isl_ast_print_options_free(print_options);
318 return p;
322 /* Print a for loop node as an openmp parallel loop.
324 * To print an openmp parallel loop we print a normal for loop, but add
325 * "#pragma openmp parallel for" in front.
327 * Variables that are declared within the body of this for loop are
328 * automatically openmp 'private'. Iterators declared outside of the
329 * for loop are automatically openmp 'shared'. As ppcg declares all iterators
330 * at the position where they are assigned, there is no need to explicitly mark
331 * variables. Their automatically assigned type is already correct.
333 * This function only generates valid OpenMP code, if the ast was generated
334 * with the 'atomic-bounds' option enabled.
337 static __isl_give isl_printer *print_for_with_openmp(
338 __isl_keep isl_ast_node *node, __isl_take isl_printer *p,
339 __isl_take isl_ast_print_options *print_options)
341 p = isl_printer_start_line(p);
342 p = isl_printer_print_str(p, "#pragma omp parallel for");
343 p = isl_printer_end_line(p);
345 p = isl_ast_node_for_print(node, p, print_options);
347 return p;
350 /* Print a for node.
352 * Depending on how the node is annotated, we either print a normal
353 * for node or an openmp parallel for node.
355 static __isl_give isl_printer *print_for(__isl_take isl_printer *p,
356 __isl_take isl_ast_print_options *print_options,
357 __isl_keep isl_ast_node *node, void *user)
359 isl_id *id;
360 int openmp;
362 openmp = 0;
363 id = isl_ast_node_get_annotation(node);
365 if (id) {
366 struct ast_node_userinfo *info;
368 info = (struct ast_node_userinfo *) isl_id_get_user(id);
369 if (info && info->is_openmp)
370 openmp = 1;
373 if (openmp)
374 p = print_for_with_openmp(node, p, print_options);
375 else
376 p = isl_ast_node_for_print(node, p, print_options);
378 isl_id_free(id);
380 return p;
383 /* Index transformation callback for pet_stmt_build_ast_exprs.
385 * "index" expresses the array indices in terms of statement iterators
386 * "iterator_map" expresses the statement iterators in terms of
387 * AST loop iterators.
389 * The result expresses the array indices in terms of
390 * AST loop iterators.
392 static __isl_give isl_multi_pw_aff *pullback_index(
393 __isl_take isl_multi_pw_aff *index, __isl_keep isl_id *id, void *user)
395 isl_pw_multi_aff *iterator_map = user;
397 iterator_map = isl_pw_multi_aff_copy(iterator_map);
398 return isl_multi_pw_aff_pullback_pw_multi_aff(index, iterator_map);
401 /* Transform the accesses in the statement associated to the domain
402 * called by "node" to refer to the AST loop iterators, construct
403 * corresponding AST expressions using "build",
404 * collect them in a ppcg_stmt and annotate the node with the ppcg_stmt.
406 static __isl_give isl_ast_node *at_each_domain(__isl_take isl_ast_node *node,
407 __isl_keep isl_ast_build *build, void *user)
409 struct ppcg_scop *scop = user;
410 isl_ast_expr *expr, *arg;
411 isl_ctx *ctx;
412 isl_id *id;
413 isl_map *map;
414 isl_pw_multi_aff *iterator_map;
415 struct ppcg_stmt *stmt;
417 ctx = isl_ast_node_get_ctx(node);
418 stmt = isl_calloc_type(ctx, struct ppcg_stmt);
419 if (!stmt)
420 goto error;
422 expr = isl_ast_node_user_get_expr(node);
423 arg = isl_ast_expr_get_op_arg(expr, 0);
424 isl_ast_expr_free(expr);
425 id = isl_ast_expr_get_id(arg);
426 isl_ast_expr_free(arg);
427 stmt->stmt = find_stmt(scop, id);
428 isl_id_free(id);
429 if (!stmt->stmt)
430 goto error;
432 map = isl_map_from_union_map(isl_ast_build_get_schedule(build));
433 map = isl_map_reverse(map);
434 iterator_map = isl_pw_multi_aff_from_map(map);
435 stmt->ref2expr = pet_stmt_build_ast_exprs(stmt->stmt, build,
436 &pullback_index, iterator_map, NULL, NULL);
437 isl_pw_multi_aff_free(iterator_map);
439 id = isl_id_alloc(isl_ast_node_get_ctx(node), NULL, stmt);
440 id = isl_id_set_free_user(id, &ppcg_stmt_free);
441 return isl_ast_node_set_annotation(node, id);
442 error:
443 ppcg_stmt_free(stmt);
444 return isl_ast_node_free(node);
447 /* Set *depth (initialized to 0 by the caller) to the maximum
448 * of the schedule depths of the leaf nodes for which this function is called.
450 static isl_bool update_depth(__isl_keep isl_schedule_node *node, void *user)
452 int *depth = user;
453 int node_depth;
455 if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf)
456 return isl_bool_true;
457 node_depth = isl_schedule_node_get_schedule_depth(node);
458 if (node_depth > *depth)
459 *depth = node_depth;
461 return isl_bool_false;
464 /* This function is called for each node in a CPU AST.
465 * In case of a user node, print the macro definitions required
466 * for printing the AST expressions in the annotation, if any.
467 * For other nodes, return true such that descendants are also
468 * visited.
470 * In particular, print the macro definitions needed for the substitutions
471 * of the original user statements.
473 static isl_bool at_node(__isl_keep isl_ast_node *node, void *user)
475 struct ppcg_stmt *stmt;
476 isl_id *id;
477 isl_printer **p = user;
479 if (isl_ast_node_get_type(node) != isl_ast_node_user)
480 return isl_bool_true;
482 id = isl_ast_node_get_annotation(node);
483 stmt = isl_id_get_user(id);
484 isl_id_free(id);
486 if (!stmt)
487 return isl_bool_error;
489 *p = ppcg_print_body_macros(*p, stmt->ref2expr);
490 if (!*p)
491 return isl_bool_error;
493 return isl_bool_false;
496 /* Print the required macros for the CPU AST "node" to "p",
497 * including those needed for the user statements inside the AST.
499 static __isl_give isl_printer *cpu_print_macros(__isl_take isl_printer *p,
500 __isl_keep isl_ast_node *node)
502 if (isl_ast_node_foreach_descendant_top_down(node, &at_node, &p) < 0)
503 return isl_printer_free(p);
504 p = ppcg_print_macros(p, node);
505 return p;
508 /* Initialize the fields of "build_info".
510 * Initially, the AST generation is not inside any parallel for loop.
512 * The contraction of the entire schedule tree is extracted
513 * right underneath the root node.
515 static isl_stat init_build_info(struct ast_build_userinfo *build_info,
516 struct ppcg_scop *scop, __isl_keep isl_schedule *schedule)
518 isl_schedule_node *node = isl_schedule_get_root(schedule);
519 node = isl_schedule_node_child(node, 0);
521 build_info->scop = scop;
522 build_info->in_parallel_for = 0;
523 build_info->contraction =
524 isl_schedule_node_get_subtree_contraction(node);
526 isl_schedule_node_free(node);
528 return isl_stat_non_null(build_info->contraction);
531 /* Clear all memory allocated by "build_info".
533 static void clear_build_info(struct ast_build_userinfo *build_info)
535 isl_union_pw_multi_aff_free(build_info->contraction);
538 /* Code generate the scop 'scop' using "schedule"
539 * and print the corresponding C code to 'p'.
541 static __isl_give isl_printer *print_scop(struct ppcg_scop *scop,
542 __isl_take isl_schedule *schedule, __isl_take isl_printer *p,
543 struct ppcg_options *options)
545 isl_ctx *ctx = isl_printer_get_ctx(p);
546 isl_ast_build *build;
547 isl_ast_print_options *print_options;
548 isl_ast_node *tree;
549 isl_id_list *iterators;
550 struct ast_build_userinfo build_info;
551 int depth;
553 depth = 0;
554 if (isl_schedule_foreach_schedule_node_top_down(schedule, &update_depth,
555 &depth) < 0)
556 goto error;
558 build = isl_ast_build_alloc(ctx);
559 iterators = ppcg_scop_generate_names(scop, depth, "c");
560 build = isl_ast_build_set_iterators(build, iterators);
561 build = isl_ast_build_set_at_each_domain(build, &at_each_domain, scop);
563 if (options->openmp) {
564 if (init_build_info(&build_info, scop, schedule) < 0)
565 build = isl_ast_build_free(build);
567 build = isl_ast_build_set_before_each_for(build,
568 &ast_build_before_for,
569 &build_info);
570 build = isl_ast_build_set_after_each_for(build,
571 &ast_build_after_for,
572 &build_info);
575 tree = isl_ast_build_node_from_schedule(build, schedule);
576 isl_ast_build_free(build);
578 if (options->openmp)
579 clear_build_info(&build_info);
581 print_options = isl_ast_print_options_alloc(ctx);
582 print_options = isl_ast_print_options_set_print_user(print_options,
583 &print_user, NULL);
585 print_options = isl_ast_print_options_set_print_for(print_options,
586 &print_for, NULL);
588 p = cpu_print_macros(p, tree);
589 p = isl_ast_node_print(tree, p, print_options);
591 isl_ast_node_free(tree);
593 return p;
594 error:
595 isl_schedule_free(schedule);
596 isl_printer_free(p);
597 return NULL;
600 /* Tile the band node "node" with tile sizes "sizes" and
601 * mark all members of the resulting tile node as "atomic".
603 static __isl_give isl_schedule_node *tile(__isl_take isl_schedule_node *node,
604 __isl_take isl_multi_val *sizes)
606 node = isl_schedule_node_band_tile(node, sizes);
607 node = ppcg_set_schedule_node_type(node, isl_ast_loop_atomic);
609 return node;
612 /* Tile "node", if it is a band node with at least 2 members.
613 * The tile sizes are set from the "tile_size" option.
615 static __isl_give isl_schedule_node *tile_band(
616 __isl_take isl_schedule_node *node, void *user)
618 struct ppcg_scop *scop = user;
619 int n;
620 isl_space *space;
621 isl_multi_val *sizes;
623 if (isl_schedule_node_get_type(node) != isl_schedule_node_band)
624 return node;
626 n = isl_schedule_node_band_n_member(node);
627 if (n <= 1)
628 return node;
630 space = isl_schedule_node_band_get_space(node);
631 sizes = ppcg_multi_val_from_int(space, scop->options->tile_size);
633 return tile(node, sizes);
636 /* Construct schedule constraints from the dependences in ps
637 * for the purpose of computing a schedule for a CPU.
639 * The proximity constraints are set to the flow dependences.
641 * If live-range reordering is allowed then the conditional validity
642 * constraints are set to the order dependences with the flow dependences
643 * as condition. That is, a live-range (flow dependence) will be either
644 * local to an iteration of a band or all adjacent order dependences
645 * will be respected by the band.
646 * The validity constraints are set to the union of the flow dependences
647 * and the forced dependences, while the coincidence constraints
648 * are set to the union of the flow dependences, the forced dependences and
649 * the order dependences.
651 * If live-range reordering is not allowed, then both the validity
652 * and the coincidence constraints are set to the union of the flow
653 * dependences and the false dependences.
655 * Note that the coincidence constraints are only set when the "openmp"
656 * options is set. Even though the way openmp pragmas are introduced
657 * does not rely on the coincident property of the schedule band members,
658 * the coincidence constraints do affect the way the schedule is constructed,
659 * such that more schedule dimensions should be detected as parallel
660 * by ast_schedule_dim_is_parallel.
661 * Since the order dependences are also taken into account by
662 * ast_schedule_dim_is_parallel, they are also added to
663 * the coincidence constraints. If the openmp handling learns
664 * how to privatize some memory, then the corresponding order
665 * dependences can be removed from the coincidence constraints.
667 static __isl_give isl_schedule_constraints *construct_cpu_schedule_constraints(
668 struct ppcg_scop *ps)
670 isl_schedule_constraints *sc;
671 isl_union_map *validity, *coincidence;
673 sc = isl_schedule_constraints_on_domain(isl_union_set_copy(ps->domain));
674 if (ps->options->live_range_reordering) {
675 sc = isl_schedule_constraints_set_conditional_validity(sc,
676 isl_union_map_copy(ps->tagged_dep_flow),
677 isl_union_map_copy(ps->tagged_dep_order));
678 validity = isl_union_map_copy(ps->dep_flow);
679 validity = isl_union_map_union(validity,
680 isl_union_map_copy(ps->dep_forced));
681 if (ps->options->openmp) {
682 coincidence = isl_union_map_copy(validity);
683 coincidence = isl_union_map_union(coincidence,
684 isl_union_map_copy(ps->dep_order));
686 } else {
687 validity = isl_union_map_copy(ps->dep_flow);
688 validity = isl_union_map_union(validity,
689 isl_union_map_copy(ps->dep_false));
690 if (ps->options->openmp)
691 coincidence = isl_union_map_copy(validity);
693 if (ps->options->openmp)
694 sc = isl_schedule_constraints_set_coincidence(sc, coincidence);
695 sc = isl_schedule_constraints_set_validity(sc, validity);
696 sc = isl_schedule_constraints_set_proximity(sc,
697 isl_union_map_copy(ps->dep_flow));
699 return sc;
702 /* Compute a schedule for the scop "ps".
704 * First derive the appropriate schedule constraints from the dependences
705 * in "ps" and then compute a schedule from those schedule constraints,
706 * possibly grouping statement instances based on the input schedule.
708 static __isl_give isl_schedule *compute_cpu_schedule(struct ppcg_scop *ps)
710 isl_schedule_constraints *sc;
711 isl_schedule *schedule;
713 if (!ps)
714 return NULL;
716 sc = construct_cpu_schedule_constraints(ps);
718 schedule = ppcg_compute_schedule(sc, ps->schedule, ps->options);
720 return schedule;
723 /* Compute a new schedule to the scop "ps" if the reschedule option is set.
724 * Otherwise, return a copy of the original schedule.
726 static __isl_give isl_schedule *optionally_compute_schedule(void *user)
728 struct ppcg_scop *ps = user;
730 if (!ps)
731 return NULL;
732 if (!ps->options->reschedule)
733 return isl_schedule_copy(ps->schedule);
734 return compute_cpu_schedule(ps);
737 /* Compute a schedule based on the dependences in "ps" and
738 * tile it if requested by the user.
740 static __isl_give isl_schedule *get_schedule(struct ppcg_scop *ps,
741 struct ppcg_options *options)
743 isl_ctx *ctx;
744 isl_schedule *schedule;
746 if (!ps)
747 return NULL;
749 ctx = isl_union_set_get_ctx(ps->domain);
750 schedule = ppcg_get_schedule(ctx, options,
751 &optionally_compute_schedule, ps);
752 if (ps->options->tile)
753 schedule = isl_schedule_map_schedule_node_bottom_up(schedule,
754 &tile_band, ps);
756 return schedule;
759 /* Generate CPU code for the scop "ps" using "schedule" and
760 * print the corresponding C code to "p", including variable declarations.
762 static __isl_give isl_printer *print_cpu_with_schedule(
763 __isl_take isl_printer *p, struct ppcg_scop *ps,
764 __isl_take isl_schedule *schedule, struct ppcg_options *options)
766 int hidden;
767 isl_set *context;
769 p = isl_printer_start_line(p);
770 p = isl_printer_print_str(p, "/* ppcg generated CPU code */");
771 p = isl_printer_end_line(p);
773 p = isl_printer_start_line(p);
774 p = isl_printer_end_line(p);
776 p = ppcg_set_macro_names(p);
777 p = ppcg_print_exposed_declarations(p, ps);
778 hidden = ppcg_scop_any_hidden_declarations(ps);
779 if (hidden) {
780 p = ppcg_start_block(p);
781 p = ppcg_print_hidden_declarations(p, ps);
784 context = isl_set_copy(ps->context);
785 context = isl_set_from_params(context);
786 schedule = isl_schedule_insert_context(schedule, context);
787 if (options->debug->dump_final_schedule)
788 isl_schedule_dump(schedule);
789 p = print_scop(ps, schedule, p, options);
790 if (hidden)
791 p = ppcg_end_block(p);
793 return p;
796 /* Generate CPU code for the scop "ps" and print the corresponding C code
797 * to "p", including variable declarations.
799 __isl_give isl_printer *print_cpu(__isl_take isl_printer *p,
800 struct ppcg_scop *ps, struct ppcg_options *options)
802 isl_schedule *schedule;
804 schedule = isl_schedule_copy(ps->schedule);
805 return print_cpu_with_schedule(p, ps, schedule, options);
808 /* Generate CPU code for "scop" and print it to "p".
810 * First obtain a schedule for "scop" and then print code for "scop"
811 * using that schedule.
813 static __isl_give isl_printer *generate(__isl_take isl_printer *p,
814 struct ppcg_scop *scop, struct ppcg_options *options)
816 isl_schedule *schedule;
818 schedule = get_schedule(scop, options);
820 return print_cpu_with_schedule(p, scop, schedule, options);
823 /* Wrapper around generate for use as a ppcg_transform callback.
825 static __isl_give isl_printer *print_cpu_wrap(__isl_take isl_printer *p,
826 struct ppcg_scop *scop, void *user)
828 struct ppcg_options *options = user;
830 return generate(p, scop, options);
833 /* Transform the code in the file called "input" by replacing
834 * all scops by corresponding CPU code and write the results to a file
835 * called "output".
837 int generate_cpu(isl_ctx *ctx, struct ppcg_options *options,
838 const char *input, const char *output)
840 FILE *output_file;
841 int r;
843 output_file = get_output_file(input, output);
844 if (!output_file)
845 return -1;
847 r = ppcg_transform(ctx, input, output_file, options,
848 &print_cpu_wrap, options);
850 fclose(output_file);
852 return r;