2 * Copyright 2010-2011 INRIA Saclay
3 * Copyright 2012-2014 Ecole Normale Superieure
4 * Copyright 2015 Sven Verdoolaege
6 * Use of this software is governed by the MIT license
8 * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
9 * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
11 * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14 #include <isl/constraint.h>
17 #include "gpu_array_tile.h"
18 #include "gpu_group.h"
22 /* Print the name of the local copy of a given group of array references.
24 __isl_give isl_printer
*gpu_array_ref_group_print_name(
25 struct gpu_array_ref_group
*group
, __isl_take isl_printer
*p
)
28 enum ppcg_group_access_type type
;
30 type
= gpu_array_ref_group_type(group
);
31 if (type
== ppcg_access_private
)
32 p
= isl_printer_print_str(p
, "private_");
33 else if (type
== ppcg_access_shared
)
34 p
= isl_printer_print_str(p
, "shared_");
37 p
= isl_printer_print_str(p
, group
->array
->name
);
38 if (!global
&& group
->local_array
->n_group
> 1) {
39 p
= isl_printer_print_str(p
, "_");
40 p
= isl_printer_print_int(p
, group
->nr
);
46 /* Return the union of all read (read = 1) and/or write (write = 1)
47 * access relations in the group.
49 __isl_give isl_union_map
*gpu_array_ref_group_access_relation(
50 struct gpu_array_ref_group
*group
, int read
, int write
)
53 isl_union_map
*access
;
55 access
= isl_union_map_empty(isl_map_get_space(group
->access
));
56 for (i
= 0; i
< group
->n_ref
; ++i
) {
59 if (!((read
&& group
->refs
[i
]->read
) ||
60 (write
&& group
->refs
[i
]->write
)))
62 map_i
= isl_map_copy(group
->refs
[i
]->access
);
63 access
= isl_union_map_union(access
,
64 isl_union_map_from_map(map_i
));
70 /* Should this array reference group be mapped to private, shared or global
72 * If we have computed both a private and a shared tile, then
73 * the tile with the smallest depth is used. If both have the same depth,
74 * then the private tile is used.
76 enum ppcg_group_access_type
gpu_array_ref_group_type(
77 struct gpu_array_ref_group
*group
)
79 if (group
->private_tile
&& group
->shared_tile
&&
80 group
->shared_tile
->depth
< group
->private_tile
->depth
)
81 return ppcg_access_shared
;
82 if (group
->private_tile
)
83 return ppcg_access_private
;
84 if (group
->shared_tile
)
85 return ppcg_access_shared
;
86 return ppcg_access_global
;
90 /* Return the effective gpu_array_tile associated to "group" or
91 * NULL if there is no such gpu_array_tile.
93 struct gpu_array_tile
*gpu_array_ref_group_tile(
94 struct gpu_array_ref_group
*group
)
96 switch (gpu_array_ref_group_type(group
)) {
97 case ppcg_access_global
:
99 case ppcg_access_shared
:
100 return group
->shared_tile
;
101 case ppcg_access_private
:
102 return group
->private_tile
;
106 /* Does the tile associated to "group" require unrolling of the schedule
107 * dimensions mapped to threads?
108 * Note that this can only happen for private tiles.
110 int gpu_array_ref_group_requires_unroll(struct gpu_array_ref_group
*group
)
112 struct gpu_array_tile
*tile
;
114 tile
= gpu_array_ref_group_tile(group
);
117 return tile
->requires_unroll
;
120 /* Given a constraint
122 * a(p,i) + j = g f(e)
124 * or -a(p,i) - j = g f(e) if sign < 0,
125 * store a(p,i) in bound->shift and g (stride) in bound->stride.
126 * a(p,i) is assumed to be an expression in only the parameters
127 * and the input dimensions.
129 static void extract_stride(__isl_keep isl_constraint
*c
,
130 struct gpu_array_bound
*bound
, __isl_keep isl_val
*stride
, int sign
)
139 isl_val_free(bound
->stride
);
140 bound
->stride
= isl_val_copy(stride
);
142 space
= isl_constraint_get_space(c
);
143 space
= isl_space_domain(space
);
145 nparam
= isl_space_dim(space
, isl_dim_param
);
146 nvar
= isl_space_dim(space
, isl_dim_set
);
148 v
= isl_constraint_get_constant_val(c
);
151 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
152 aff
= isl_aff_set_constant_val(aff
, v
);
154 for (i
= 0; i
< nparam
; ++i
) {
155 if (!isl_constraint_involves_dims(c
, isl_dim_param
, i
, 1))
157 v
= isl_constraint_get_coefficient_val(c
, isl_dim_param
, i
);
160 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_param
, i
, v
);
163 for (i
= 0; i
< nvar
; ++i
) {
164 if (!isl_constraint_involves_dims(c
, isl_dim_in
, i
, 1))
166 v
= isl_constraint_get_coefficient_val(c
, isl_dim_in
, i
);
169 aff
= isl_aff_add_coefficient_val(aff
, isl_dim_in
, i
, v
);
175 /* Given an equality constraint of a map with a single output dimension j,
176 * check if the constraint is of the form
178 * a(p,i) + j = g f(e)
180 * with a(p,i) an expression in the parameters and input dimensions
181 * and f(e) an expression in the existentially quantified variables.
182 * If so, and if g is larger than any such g from a previously considered
183 * constraint, then call extract_stride to record the stride information
186 static isl_stat
check_stride_constraint(__isl_take isl_constraint
*c
,
193 struct gpu_array_bound
*bound
= user
;
195 ctx
= isl_constraint_get_ctx(c
);
196 n_div
= isl_constraint_dim(c
, isl_dim_div
);
197 v
= isl_constraint_get_coefficient_val(c
, isl_dim_out
, 0);
199 if (n_div
&& (isl_val_is_one(v
) || isl_val_is_negone(v
))) {
200 int s
= isl_val_sgn(v
);
201 isl_val
*stride
= isl_val_zero(ctx
);
204 for (i
= 0; i
< n_div
; ++i
) {
205 v
= isl_constraint_get_coefficient_val(c
,
207 stride
= isl_val_gcd(stride
, v
);
209 if (!isl_val_is_zero(stride
) &&
210 isl_val_gt(stride
, bound
->stride
))
211 extract_stride(c
, bound
, stride
, s
);
213 isl_val_free(stride
);
217 isl_constraint_free(c
);
221 /* Given contraints on an array index i, check if we can find
222 * a shift a(p) and a stride g such that
226 * If so, record the information in bound and apply the mapping
227 * i -> (i + a(p))/g to the array index in bounds and return
228 * the new constraints.
229 * If not, simply return the original constraints.
231 * If bounds is a subset of the space
235 * then the bound recorded in bound->shift is of the form
239 * with s(D) equal to a(p) above.
240 * Next, we construct a mapping of the form
242 * [D -> i] -> [D -> (i + S(D))/g]
244 * This mapping is computed as follows.
245 * We first introduce "i" in the domain through precomposition
246 * with [D -> i] -> D obtaining
250 * Adding [D -> i] -> i produces
252 * [D -> i] -> i + s(D)
254 * and the domain product with [D -> i] -> D yields
256 * [D -> i] -> [D -> i + s(D)]
258 * Composition with [D -> i] -> [D -> i/g] gives the desired result.
260 static __isl_give isl_basic_map
*check_stride(struct gpu_array_bound
*bound
,
261 __isl_take isl_basic_map
*bounds
)
265 isl_basic_map
*shift
, *id
, *bmap
, *scale
;
269 bound
->stride
= NULL
;
271 hull
= isl_basic_map_affine_hull(isl_basic_map_copy(bounds
));
273 isl_basic_map_foreach_constraint(hull
, &check_stride_constraint
, bound
);
275 isl_basic_map_free(hull
);
280 shift
= isl_basic_map_from_aff(isl_aff_copy(bound
->shift
));
281 space
= isl_basic_map_get_space(bounds
);
282 bmap
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
283 shift
= isl_basic_map_apply_range(bmap
, shift
);
284 space
= isl_basic_map_get_space(bounds
);
285 id
= isl_basic_map_range_map(isl_basic_map_universe(space
));
286 shift
= isl_basic_map_sum(id
, shift
);
287 space
= isl_basic_map_get_space(bounds
);
288 id
= isl_basic_map_domain_map(isl_basic_map_universe(space
));
289 shift
= isl_basic_map_range_product(id
, shift
);
291 space
= isl_space_domain(isl_basic_map_get_space(bounds
));
292 id
= isl_basic_map_identity(isl_space_map_from_set(space
));
293 space
= isl_space_range(isl_basic_map_get_space(bounds
));
294 aff
= isl_aff_zero_on_domain(isl_local_space_from_space(space
));
295 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, 0, 1);
296 aff
= isl_aff_scale_down_val(aff
, isl_val_copy(bound
->stride
));
297 scale
= isl_basic_map_from_aff(aff
);
298 scale
= isl_basic_map_product(id
, scale
);
300 bmap
= isl_basic_map_apply_range(shift
, scale
);
301 bset
= isl_basic_set_apply(isl_basic_map_wrap(bounds
), bmap
);
302 bounds
= isl_basic_set_unwrap(bset
);
307 /* Data used in compute_array_dim_size and compute_size_in_direction.
309 * pos is the position of the variable representing the array index,
310 * i.e., the variable for which want to compute the size. This variable
311 * is also the last variable in the set.
313 struct gpu_size_info
{
315 struct gpu_array_bound
*bound
;
319 /* Given a constraint from the basic set describing the bounds on
320 * an array index, check if it is a lower bound, say m i >= b(x), and,
321 * if so, check whether the expression "i - ceil(b(x)/m) + 1" has a constant
322 * upper bound. If so, and if this bound is smaller than any bound
323 * derived from earlier constraints, set the size to this bound on
324 * the expression and the lower bound to ceil(b(x)/m).
326 static isl_stat
compute_size_in_direction(__isl_take isl_constraint
*c
,
329 struct gpu_size_info
*size
= user
;
336 nparam
= isl_basic_set_dim(size
->bset
, isl_dim_param
);
337 n_div
= isl_constraint_dim(c
, isl_dim_div
);
339 if (isl_constraint_involves_dims(c
, isl_dim_div
, 0, n_div
) ||
340 !isl_constraint_is_lower_bound(c
, isl_dim_set
, size
->pos
)) {
341 isl_constraint_free(c
);
345 aff
= isl_constraint_get_bound(c
, isl_dim_set
, size
->pos
);
346 aff
= isl_aff_ceil(aff
);
348 lb
= isl_aff_copy(aff
);
350 aff
= isl_aff_neg(aff
);
351 aff
= isl_aff_add_coefficient_si(aff
, isl_dim_in
, size
->pos
, 1);
353 v
= isl_basic_set_max_val(size
->bset
, aff
);
356 if (isl_val_is_int(v
)) {
357 v
= isl_val_add_ui(v
, 1);
358 if (!size
->bound
->size
|| isl_val_lt(v
, size
->bound
->size
)) {
359 isl_val_free(size
->bound
->size
);
360 size
->bound
->size
= isl_val_copy(v
);
361 lb
= isl_aff_drop_dims(lb
, isl_dim_in
, size
->pos
, 1);
362 isl_aff_free(size
->bound
->lb
);
363 size
->bound
->lb
= isl_aff_copy(lb
);
369 isl_constraint_free(c
);
374 /* Given a basic map "bounds" that maps parameters and input dimensions
375 * to a single output dimension, look for an expression in the parameters
376 * and input dimensions such that the range of the output dimension shifted
377 * by this expression is a constant.
379 * In particular, we currently only consider lower bounds on the output
380 * dimension as candidate expressions.
382 static int compute_array_dim_size(struct gpu_array_bound
*bound
,
383 __isl_take isl_basic_map
*bounds
)
385 struct gpu_size_info size
;
387 bounds
= isl_basic_map_detect_equalities(bounds
);
388 bounds
= check_stride(bound
, bounds
);
394 size
.pos
= isl_basic_map_dim(bounds
, isl_dim_in
);
395 size
.bset
= isl_basic_map_wrap(bounds
);
396 size
.bset
= isl_basic_set_flatten(size
.bset
);
397 size
.bset
= isl_set_simple_hull(isl_basic_set_compute_divs(size
.bset
));
398 isl_basic_set_foreach_constraint(size
.bset
, &compute_size_in_direction
,
400 isl_basic_set_free(size
.bset
);
402 return bound
->size
? 0 : -1;
405 /* Check if we can find a memory tile for the given array
406 * based on the given accesses, and if so, put the results in "tile".
408 * We project the accesses on each index in turn and look for a parametric
409 * offset such that the size is constant.
411 * tile->depth is initialized to the input dimension of the computed bounds.
413 static int can_tile(__isl_keep isl_map
*access
, struct gpu_array_tile
*tile
)
417 tile
->depth
= isl_map_dim(access
, isl_dim_in
);
419 for (i
= 0; i
< tile
->n
; ++i
) {
423 access_i
= isl_map_copy(access
);
424 access_i
= isl_map_project_out(access_i
, isl_dim_out
, 0, i
);
425 access_i
= isl_map_project_out(access_i
, isl_dim_out
,
426 1, tile
->n
- (i
+ 1));
427 access_i
= isl_map_compute_divs(access_i
);
428 hull
= isl_map_simple_hull(access_i
);
429 if (compute_array_dim_size(&tile
->bound
[i
], hull
) < 0)
436 /* Internal data structure for gpu_group_references.
438 * scop represents the input scop.
439 * kernel_depth is the schedule depth where the kernel launch will
440 * be introduced, i.e., it is the depth of the band that is mapped
442 * shared_depth is the schedule depth at which the copying to/from
443 * shared memory is computed. The copy operation may then
444 * later be hoisted to a higher level.
445 * thread_depth is the schedule depth where the thread mark is located,
446 * i.e., it is the depth of the band that is mapped to threads and also
447 * the schedule depth at which the copying to/from private memory
448 * is computed. The copy operation may then later be hoisted to
450 * n_thread is the number of schedule dimensions in the band that
451 * is mapped to threads.
452 * privatization lives in the range of thread_sched (i.e., it is
453 * of dimension thread_depth + n_thread) and encodes the mapping
454 * to thread identifiers (as parameters).
455 * host_sched contains the kernel_depth dimensions of the host schedule.
456 * shared_sched contains the first shared_depth dimensions of the
458 * copy_sched contains the first thread_depth dimensions of the
460 * thread_sched contains the first (thread_depth + n_thread) dimensions
461 * of the kernel schedule.
462 * full_sched is a union_map representation of the entire kernel schedule.
463 * The schedules are all formulated in terms of the original statement
464 * instances, i.e., those that appear in the domains of the access
467 struct gpu_group_data
{
468 struct ppcg_scop
*scop
;
473 isl_set
*privatization
;
474 isl_union_map
*host_sched
;
475 isl_union_map
*shared_sched
;
476 isl_union_map
*copy_sched
;
477 isl_union_map
*thread_sched
;
478 isl_union_map
*full_sched
;
481 /* Construct a map from domain_space to domain_space that increments
482 * the dimension at position "pos" and leaves all other dimensions
485 static __isl_give isl_map
*next(__isl_take isl_space
*domain_space
, int pos
)
491 space
= isl_space_map_from_set(domain_space
);
492 next
= isl_multi_aff_identity(space
);
493 aff
= isl_multi_aff_get_aff(next
, pos
);
494 aff
= isl_aff_add_constant_si(aff
, 1);
495 next
= isl_multi_aff_set_aff(next
, pos
, aff
);
497 return isl_map_from_multi_aff(next
);
500 /* Check if the given access is coalesced (or if there is no point
501 * in trying to coalesce the access by mapping the array to shared memory).
502 * That is, check whether incrementing the dimension that will get
503 * wrapped over the last thread index results in incrementing
504 * the last array index.
506 * If no two consecutive array elements are ever accessed by "access",
507 * then mapping the corresponding array to shared memory will not
508 * improve coalescing. In fact, the copying will likely be performed
509 * by a single thread. Consider the access as coalesced such that
510 * the caller will not try and map the array to shared memory just
511 * to improve coalescing.
513 * This function is only called for access relations without reuse and
514 * kernels with at least one thread identifier.
516 static int access_is_coalesced(struct gpu_group_data
*data
,
517 __isl_keep isl_union_map
*access
)
523 isl_map
*next_thread_x
;
524 isl_map
*next_element
;
526 int coalesced
, empty
;
528 access
= isl_union_map_copy(access
);
529 access
= isl_union_map_apply_domain(access
,
530 isl_union_map_copy(data
->full_sched
));
531 access_map
= isl_map_from_union_map(access
);
533 space
= isl_map_get_space(access_map
);
534 space
= isl_space_range(space
);
535 dim
= isl_space_dim(space
, isl_dim_set
);
537 next_element
= isl_map_empty(isl_space_map_from_set(space
));
539 next_element
= next(space
, dim
- 1);
541 accessed
= isl_map_range(isl_map_copy(access_map
));
542 map
= isl_map_copy(next_element
);
543 map
= isl_map_intersect_domain(map
, isl_set_copy(accessed
));
544 map
= isl_map_intersect_range(map
, accessed
);
545 empty
= isl_map_is_empty(map
);
548 if (empty
< 0 || empty
) {
549 isl_map_free(next_element
);
550 isl_map_free(access_map
);
554 space
= isl_map_get_space(access_map
);
555 space
= isl_space_domain(space
);
556 next_thread_x
= next(space
, data
->thread_depth
+ data
->n_thread
- 1);
558 map
= isl_map_apply_domain(next_thread_x
, isl_map_copy(access_map
));
559 map
= isl_map_apply_range(map
, access_map
);
561 coalesced
= isl_map_is_subset(map
, next_element
);
563 isl_map_free(next_element
);
569 /* Replace the host schedule dimensions in the access relation "access"
570 * by parameters, so that they are treated as fixed when checking for reuse
571 * (within a kernel) or whether two consecutive elements are accessed
574 static __isl_give isl_union_map
*localize_access(struct gpu_group_data
*data
,
575 __isl_take isl_union_map
*access
)
583 umap
= isl_union_map_copy(data
->host_sched
);
584 space
= isl_union_map_get_space(umap
);
585 n
= data
->kernel_depth
;
586 ids
= ppcg_scop_generate_names(data
->scop
, n
, "__ppcg_host_");
587 param
= parametrization(space
, n
, 0, ids
);
588 isl_id_list_free(ids
);
589 umap
= isl_union_map_intersect_range(umap
,
590 isl_union_set_from_set(param
));
591 access
= isl_union_map_intersect_domain(access
,
592 isl_union_map_domain(umap
));
597 /* Given an access relation in terms of at least data->thread_depth initial
598 * dimensions of the computed schedule, check if it is bijective for
599 * fixed values of the first data->thread_depth dimensions.
600 * We perform this check by equating these dimensions to parameters.
602 static int access_is_bijective(struct gpu_group_data
*data
,
603 __isl_keep isl_map
*access
)
611 access
= isl_map_copy(access
);
612 space
= isl_space_params(isl_map_get_space(access
));
613 ids
= ppcg_scop_generate_names(data
->scop
, data
->thread_depth
, "s");
614 dim
= isl_map_dim(access
, isl_dim_in
);
615 par
= parametrization(space
, dim
, 0, ids
);
616 isl_id_list_free(ids
);
617 access
= isl_map_intersect_domain(access
, par
);
618 res
= isl_map_is_bijective(access
);
619 isl_map_free(access
);
624 /* Compute the number of outer schedule tile dimensions that affect
625 * the offset of "tile".
626 * If there is no such dimension, then return the index
627 * of the first kernel dimension, i.e., data->kernel_depth.
629 static int compute_tile_depth(struct gpu_group_data
*data
,
630 struct gpu_array_tile
*tile
)
634 for (j
= tile
->depth
- 1; j
>= data
->kernel_depth
; --j
) {
635 for (i
= 0; i
< tile
->n
; ++i
) {
639 lb
= tile
->bound
[i
].lb
;
640 if (isl_aff_involves_dims(lb
, isl_dim_in
, j
, 1))
643 shift
= tile
->bound
[i
].shift
;
646 if (isl_aff_involves_dims(shift
, isl_dim_in
, j
, 1))
656 /* Return the lowest depth between data->kernel_depth and data->thread_depth
657 * at which every array element accessed through "acc" is accessed
658 * by a single thread. The input dimension of "acc" is
659 * data->thread_depth + data->n_thread, where the final data->n_thread
660 * dimensions are those that will be mapped to threads.
661 * If the values for these dimensions are uniquely determined
662 * by the array index and a given number of outer dimensions, then
663 * there is only one thread accessing that array element within those
666 * The input space of "acc" is first split up, such that it has the form
670 * with O the outer dimensions, T the dimensions that will be mapped to threads
671 * and A the array index.
673 * Then the positions of T and A are interchanged to simplify the test
674 * whether T uniquely depends on O and A.
675 * In particular, the above access relation is first combined with
681 * [O -> T] -> [A -> T]
687 * is extracted, which is then uncurried to
691 * Finally, the final dimensions of O are projected out one by one
692 * until T is no longer uniquely determined by A and the remaining
693 * dimensions in O. The value returned is that of the last dimension
694 * that was successfully projected out.
695 * Note that there is no need to test whether [O -> A] -> T itself
696 * is single-valued as that was already tested in access_is_bijective.
698 static int compute_accessed_by_single_thread_depth(struct gpu_group_data
*data
,
699 __isl_keep isl_map
*acc
)
706 if (data
->thread_depth
== data
->kernel_depth
)
707 return data
->thread_depth
;
709 acc
= isl_map_copy(acc
);
711 space
= isl_map_get_space(acc
);
712 space
= isl_space_params(space
);
713 space
= isl_space_set_from_params(space
);
714 space
= isl_space_add_dims(space
, isl_dim_set
, data
->thread_depth
);
715 space
= isl_space_from_domain(space
);
716 space
= isl_space_add_dims(space
, isl_dim_out
, data
->n_thread
);
717 space
= isl_space_wrap(space
);
718 map
= isl_set_flatten_map(isl_set_universe(space
));
719 acc
= isl_map_apply_range(map
, acc
);
721 space
= isl_space_domain(isl_map_get_space(acc
));
722 map
= isl_map_range_map(isl_map_universe(isl_space_unwrap(space
)));
723 acc
= isl_map_range_product(acc
, map
);
724 acc
= isl_map_domain_factor_domain(acc
);
725 acc
= isl_map_uncurry(acc
);
727 for (i
= data
->thread_depth
- 1; i
>= data
->kernel_depth
; --i
) {
728 acc
= isl_map_project_out(acc
, isl_dim_in
, i
, 1);
729 sv
= isl_map_is_single_valued(acc
);
741 /* Adjust the fields of "tile" to reflect the new input dimension "depth".
742 * The dimension beyond "depth" are assumed not to affect the tile,
743 * so they can simply be dropped.
745 static int tile_adjust_depth(struct gpu_array_tile
*tile
, int depth
)
749 if (tile
->depth
== depth
)
752 for (i
= 0; i
< tile
->n
; ++i
) {
753 tile
->bound
[i
].lb
= isl_aff_drop_dims(tile
->bound
[i
].lb
,
754 isl_dim_in
, depth
, tile
->depth
- depth
);
755 if (!tile
->bound
[i
].lb
)
757 if (!tile
->bound
[i
].shift
)
759 tile
->bound
[i
].shift
= isl_aff_drop_dims(tile
->bound
[i
].shift
,
760 isl_dim_in
, depth
, tile
->depth
- depth
);
761 if (!tile
->bound
[i
].shift
)
770 /* Determine the number of schedule dimensions that affect the offset of the
771 * shared or private tile "tile" and store the result in tile->depth, with
772 * a lower bound of data->kernel_depth.
773 * Also adjust the fields of the tile to only refer to the tile->depth
774 * outer schedule dimensions.
776 static isl_stat
tile_set_depth(struct gpu_group_data
*data
,
777 struct gpu_array_tile
*tile
)
779 if (tile_adjust_depth(tile
, compute_tile_depth(data
, tile
)) < 0)
780 return isl_stat_error
;
785 /* Determine the number of schedule dimensions that affect the offset of the
786 * shared tile and store the minimum of the private and shared tile depth
787 * in group->min_depth, with a lower bound of data->kernel_depth.
788 * If there is no tile defined on the array reference group,
789 * then set group->min_depth to data->thread_depth.
791 static int set_depth(struct gpu_group_data
*data
,
792 struct gpu_array_ref_group
*group
)
794 group
->min_depth
= data
->thread_depth
;
796 if (group
->private_tile
) {
797 if (group
->private_tile
->depth
< group
->min_depth
)
798 group
->min_depth
= group
->private_tile
->depth
;
800 if (group
->shared_tile
) {
801 if (tile_set_depth(data
, group
->shared_tile
) < 0)
803 if (group
->shared_tile
->depth
< group
->min_depth
)
804 group
->min_depth
= group
->shared_tile
->depth
;
810 /* Fill up the groups array with singleton groups, i.e., one group
811 * per reference, initializing the array, access, write, n_ref and refs fields.
812 * In particular the access field is initialized to the scheduled
813 * access relation of the array reference.
815 * Return the number of elements initialized, i.e., the number of
816 * active references in the current kernel.
818 static int populate_array_references(struct gpu_local_array_info
*local
,
819 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
823 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->copy_sched
);
826 for (i
= 0; i
< local
->array
->n_ref
; ++i
) {
829 struct gpu_array_ref_group
*group
;
830 struct gpu_stmt_access
*access
= local
->array
->refs
[i
];
832 map
= isl_map_copy(access
->access
);
833 umap
= isl_union_map_from_map(map
);
834 umap
= isl_union_map_apply_domain(umap
,
835 isl_union_map_copy(data
->copy_sched
));
837 if (isl_union_map_is_empty(umap
)) {
838 isl_union_map_free(umap
);
842 map
= isl_map_from_union_map(umap
);
843 map
= isl_map_detect_equalities(map
);
845 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
848 group
->local_array
= local
;
849 group
->array
= local
->array
;
851 group
->write
= access
->write
;
852 group
->exact_write
= access
->exact_write
;
853 group
->slice
= access
->n_index
< local
->array
->n_index
;
854 group
->refs
= &local
->array
->refs
[i
];
863 /* If group->n_ref == 1, then group->refs was set by
864 * populate_array_references to point directly into
865 * group->array->refs and should not be freed.
866 * If group->n_ref > 1, then group->refs was set by join_groups
867 * to point to a newly allocated array.
869 struct gpu_array_ref_group
*gpu_array_ref_group_free(
870 struct gpu_array_ref_group
*group
)
874 gpu_array_tile_free(group
->shared_tile
);
875 gpu_array_tile_free(group
->private_tile
);
876 isl_map_free(group
->access
);
877 if (group
->n_ref
> 1)
883 /* Check if the access relations of group1 and group2 overlap within
886 static int accesses_overlap(struct gpu_array_ref_group
*group1
,
887 struct gpu_array_ref_group
*group2
)
891 disjoint
= isl_map_is_disjoint(group1
->access
, group2
->access
);
898 /* Combine the given two groups into a single group, containing
899 * the references of both groups.
901 static struct gpu_array_ref_group
*join_groups(
902 struct gpu_array_ref_group
*group1
,
903 struct gpu_array_ref_group
*group2
)
907 struct gpu_array_ref_group
*group
;
909 if (!group1
|| !group2
)
912 ctx
= isl_map_get_ctx(group1
->access
);
913 group
= isl_calloc_type(ctx
, struct gpu_array_ref_group
);
916 group
->local_array
= group1
->local_array
;
917 group
->array
= group1
->array
;
918 group
->access
= isl_map_union(isl_map_copy(group1
->access
),
919 isl_map_copy(group2
->access
));
920 group
->write
= group1
->write
|| group2
->write
;
921 group
->exact_write
= group1
->exact_write
&& group2
->exact_write
;
922 group
->slice
= group1
->slice
|| group2
->slice
;
923 group
->n_ref
= group1
->n_ref
+ group2
->n_ref
;
924 group
->refs
= isl_alloc_array(ctx
, struct gpu_stmt_access
*,
927 return gpu_array_ref_group_free(group
);
928 for (i
= 0; i
< group1
->n_ref
; ++i
)
929 group
->refs
[i
] = group1
->refs
[i
];
930 for (i
= 0; i
< group2
->n_ref
; ++i
)
931 group
->refs
[group1
->n_ref
+ i
] = group2
->refs
[i
];
936 /* Combine the given two groups into a single group and free
937 * the original two groups.
939 static struct gpu_array_ref_group
*join_groups_and_free(
940 struct gpu_array_ref_group
*group1
,
941 struct gpu_array_ref_group
*group2
)
943 struct gpu_array_ref_group
*group
;
945 group
= join_groups(group1
, group2
);
946 gpu_array_ref_group_free(group1
);
947 gpu_array_ref_group_free(group2
);
951 /* Report that the array reference group with the given access relation
952 * is not mapped to shared memory in the given kernel because
953 * it does not exhibit any reuse and is considered to be coalesced.
955 static void report_no_reuse_and_coalesced(struct ppcg_kernel
*kernel
,
956 __isl_keep isl_union_map
*access
)
961 ctx
= isl_union_map_get_ctx(access
);
962 p
= isl_printer_to_file(ctx
, stdout
);
963 p
= isl_printer_print_str(p
, "Array reference group ");
964 p
= isl_printer_print_union_map(p
, access
);
965 p
= isl_printer_print_str(p
,
966 " not considered for mapping to shared memory in kernel");
967 p
= isl_printer_print_int(p
, kernel
->id
);
968 p
= isl_printer_print_str(p
,
969 " because it exhibits no reuse and is considered to be coalesced");
970 p
= isl_printer_end_line(p
);
974 /* Given an access relation in terms of the data->thread_depth initial
975 * dimensions of the computed schedule and the thread identifiers
976 * (as parameters), check if the use of the corresponding private tile
977 * requires unrolling.
979 * If we are creating a private tile because we are forced to,
980 * then no unrolling is required.
981 * Otherwise we check if "access" is bijective and unrolling
982 * is required if it is not. Note that the access relation
983 * has already been determined to be bijective before the introduction
984 * of the thread identifiers and the removal of the schedule dimensions
985 * that are mapped to these threads. If the access relation is no longer
986 * bijective, then this means that more than one value of one of those
987 * schedule dimensions is mapped to the same thread and therefore
988 * unrolling is required.
990 static int check_requires_unroll(struct gpu_group_data
*data
,
991 __isl_keep isl_map
*access
, int force_private
)
997 bijective
= access_is_bijective(data
, access
);
1003 /* Map the domain of "access" to the outer data->shared_depth
1004 * schedule dimensions. When data->shared_depth is equal to
1005 * data->thread_depth, this result is already available in group->access.
1007 static __isl_give isl_map
*shared_access(struct gpu_array_ref_group
*group
,
1008 __isl_keep isl_union_map
*access
, struct gpu_group_data
*data
)
1010 isl_union_map
*shared
;
1012 if (data
->shared_depth
== data
->thread_depth
)
1013 return isl_map_copy(group
->access
);
1015 shared
= isl_union_map_copy(access
);
1016 shared
= isl_union_map_apply_domain(shared
,
1017 isl_union_map_copy(data
->shared_sched
));
1018 return isl_map_from_union_map(shared
);
1021 /* Compute the private and/or shared memory tiles for the array
1022 * reference group "group" of array "array".
1023 * Return 0 on success and -1 on error.
1025 * If the array is a read-only scalar or if the user requested
1026 * not to use shared or private memory, then we do not need to do anything.
1028 * If any reference in the reference group accesses more than one element,
1029 * then we would have to make sure that the layout in shared memory
1030 * is the same as that in global memory. Since we do not handle this yet
1031 * (and it may not even be possible), we refuse to map to private or
1032 * shared memory in such cases.
1034 * If the array group involves any may writes (that are not must writes),
1035 * then we would have to make sure that we load the data into shared/private
1036 * memory first in case the data is not written by the kernel
1037 * (but still written back out to global memory).
1038 * Since we don't have any such mechanism at the moment, we don't
1039 * compute shared/private tiles for groups involving may writes.
1041 * We only try to compute a shared memory tile if there is any reuse
1042 * or if the access is not coalesced.
1043 * Reuse and coalescing are checked within the given kernel.
1045 * For computing a private memory tile, we also require that there is
1046 * some reuse. Moreover, we require that the access is private
1047 * to the thread. That is, we check that any given array element
1048 * is only accessed by a single thread.
1049 * We compute an access relation that maps the outer
1050 * data->thread_depth + data->n_thread schedule dimensions.
1051 * The latter data->n_thread will be mapped to thread identifiers.
1052 * We actually check that those iterators that will be wrapped
1053 * partition the array space. This check is stricter than necessary
1054 * since several iterations may be mapped onto the same thread
1055 * and then they could be allowed to access the same memory elements,
1056 * but our check does not allow this situation.
1058 * For private memory tiles, the number of schedule dimensions that
1059 * affect the offset is computed and stored in tile->depth, with
1060 * a lower bound of data->kernel_depth. If this depth is smaller
1061 * than the minimal depth that still ensures that every element
1062 * is accessed by a single thread, then the depth is raised
1063 * to this minimal depth.
1064 * The fields of the tile are then adjusted to only refer to the tile->depth
1065 * outer schedule dimensions.
1067 * We also check that the index expression only depends on parallel
1068 * loops. That way, we can move those loops innermost and unroll them.
1069 * Again, we use a test that is stricter than necessary.
1070 * We actually check whether the index expression only depends
1071 * on the iterators that are wrapped over the threads.
1072 * These are necessarily parallel, but there may be more parallel loops.
1074 * Combining the injectivity of the first test with the single-valuedness
1075 * of the second test, we simply test for bijectivity.
1077 * If the use of the private tile requires unrolling, but some
1078 * of the other arrays are forcibly mapped to private memory,
1079 * then we do not allow the use of this private tile since
1080 * we cannot move the schedule dimensions that need to be unrolled down
1081 * without performing some kind of expansion on those arrays
1082 * that are forcibly mapped to private memory.
1084 * If the array is marked force_private, then we bypass all checks
1085 * and assume we can (and should) use registers.
1087 * If it turns out we can (or have to) use registers, we compute
1088 * the private memory tile size using can_tile, after introducing a dependence
1089 * on the thread indices.
1091 static int compute_group_bounds_core(struct ppcg_kernel
*kernel
,
1092 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
1094 isl_ctx
*ctx
= isl_space_get_ctx(group
->array
->space
);
1095 isl_union_map
*access
, *local
;
1096 int n_index
= group
->array
->n_index
;
1097 int no_reuse
, coalesced
;
1099 int force_private
= group
->local_array
->force_private
;
1100 int use_shared
= kernel
->options
->use_shared_memory
&&
1102 int use_private
= force_private
|| kernel
->options
->use_private_memory
;
1104 int requires_unroll
;
1107 if (!use_shared
&& !use_private
)
1109 if (gpu_array_is_read_only_scalar(group
->array
))
1111 if (!force_private
&& !group
->exact_write
)
1116 access
= gpu_array_ref_group_access_relation(group
, 1, 1);
1117 local
= localize_access(data
, isl_union_map_copy(access
));
1118 no_reuse
= isl_union_map_is_injective(local
);
1121 if (use_shared
&& no_reuse
)
1122 coalesced
= access_is_coalesced(data
, local
);
1123 isl_union_map_free(local
);
1125 if (r
>= 0 && kernel
->options
->debug
->verbose
&&
1126 use_shared
&& no_reuse
&& coalesced
)
1127 report_no_reuse_and_coalesced(kernel
, access
);
1129 if (use_shared
&& (!no_reuse
|| !coalesced
)) {
1130 group
->shared_tile
= gpu_array_tile_create(ctx
,
1131 group
->array
->n_index
);
1132 acc
= shared_access(group
, access
, data
);
1133 if (!group
->shared_tile
)
1135 else if (!can_tile(acc
, group
->shared_tile
))
1136 group
->shared_tile
=
1137 gpu_array_tile_free(group
->shared_tile
);
1141 if (r
< 0 || (!force_private
&& (!use_private
|| no_reuse
))) {
1142 isl_union_map_free(access
);
1146 access
= isl_union_map_apply_domain(access
,
1147 isl_union_map_copy(data
->thread_sched
));
1149 acc
= isl_map_from_union_map(access
);
1151 if (!force_private
&& !access_is_bijective(data
, acc
)) {
1156 unique_depth
= compute_accessed_by_single_thread_depth(data
, acc
);
1158 acc
= isl_map_intersect_domain(acc
, isl_set_copy(data
->privatization
));
1159 acc
= isl_map_project_out(acc
, isl_dim_in
, data
->thread_depth
,
1161 requires_unroll
= check_requires_unroll(data
, acc
, force_private
);
1162 if (unique_depth
< 0 || requires_unroll
< 0 ||
1163 (requires_unroll
&& kernel
->any_force_private
)) {
1165 return requires_unroll
< 0 ? -1 : 0;
1168 group
->private_tile
= gpu_array_tile_create(ctx
, n_index
);
1169 if (!group
->private_tile
) {
1173 group
->private_tile
->requires_unroll
= requires_unroll
;
1174 if (!can_tile(acc
, group
->private_tile
))
1175 group
->private_tile
= gpu_array_tile_free(group
->private_tile
);
1179 if (group
->private_tile
) {
1180 struct gpu_array_tile
*tile
= group
->private_tile
;
1181 int tile_depth
= compute_tile_depth(data
, tile
);
1182 if (tile_depth
< unique_depth
)
1183 tile_depth
= unique_depth
;
1184 if (tile_adjust_depth(tile
, tile_depth
) < 0)
1188 if (force_private
&& !group
->private_tile
)
1189 isl_die(ctx
, isl_error_internal
,
1190 "unable to map array reference group to registers",
1196 /* Compute the private and/or shared memory tiles for the array
1197 * reference group "group" of array "array" and set the tile depth.
1198 * Return 0 on success and -1 on error.
1200 static int compute_group_bounds(struct ppcg_kernel
*kernel
,
1201 struct gpu_array_ref_group
*group
, struct gpu_group_data
*data
)
1205 if (compute_group_bounds_core(kernel
, group
, data
) < 0)
1207 if (set_depth(data
, group
) < 0)
1213 /* If two groups have overlapping access relations (as determined by
1214 * the "overlap" function) and if one of them involves a write,
1215 * then merge the two groups into one.
1216 * If "compute_bounds" is set, then call compute_group_bounds
1217 * on the merged groups.
1219 * Return the updated number of groups.
1220 * Return -1 on error.
1222 static int group_writes(struct ppcg_kernel
*kernel
,
1223 int n
, struct gpu_array_ref_group
**groups
,
1224 int (*overlap
)(struct gpu_array_ref_group
*group1
,
1225 struct gpu_array_ref_group
*group2
), int compute_bounds
,
1226 struct gpu_group_data
*data
)
1230 for (i
= 0; i
< n
; ++i
) {
1231 for (j
= n
- 1; j
> i
; --j
) {
1232 if (!groups
[i
]->write
&& !groups
[j
]->write
)
1235 if (!overlap(groups
[i
], groups
[j
]))
1238 groups
[i
] = join_groups_and_free(groups
[i
], groups
[j
]);
1240 groups
[j
] = groups
[n
- 1];
1241 groups
[n
- 1] = NULL
;
1246 if (compute_bounds
&&
1247 compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1255 /* If two groups have overlapping access relations (within the innermost
1256 * loop) and if one of them involves a write, then merge the two groups
1259 * Return the updated number of groups.
1261 static int group_overlapping_writes(struct ppcg_kernel
*kernel
,
1262 int n
, struct gpu_array_ref_group
**groups
,
1263 struct gpu_group_data
*data
)
1265 return group_writes(kernel
, n
, groups
, &accesses_overlap
, 0, data
);
1268 /* Check if the access relations of group1 and group2 overlap within
1269 * the outermost min(group1->min_depth, group2->min_depth) loops.
1271 static int depth_accesses_overlap(struct gpu_array_ref_group
*group1
,
1272 struct gpu_array_ref_group
*group2
)
1277 isl_map
*map_i
, *map_j
, *map
;
1279 depth
= group1
->min_depth
;
1280 if (group2
->min_depth
< depth
)
1281 depth
= group2
->min_depth
;
1282 map_i
= isl_map_copy(group1
->access
);
1283 dim
= isl_map_dim(map_i
, isl_dim_in
);
1284 map_i
= isl_map_eliminate(map_i
, isl_dim_in
, depth
, dim
- depth
);
1285 map_j
= isl_map_copy(group2
->access
);
1286 map_j
= isl_map_eliminate(map_j
, isl_dim_in
, depth
, dim
- depth
);
1287 map
= isl_map_intersect(map_i
, map_j
);
1288 empty
= isl_map_is_empty(map
);
1294 /* If two groups have overlapping access relations (within the outer
1295 * depth loops) and if one of them involves a write,
1296 * then merge the two groups into one.
1298 * Return the updated number of groups.
1300 static int group_depth_overlapping_writes(struct ppcg_kernel
*kernel
,
1301 int n
, struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1303 return group_writes(kernel
, n
, groups
, &depth_accesses_overlap
, 1,
1307 /* Is the size of the tile specified by "tile" smaller than the sum of
1308 * the sizes of the tiles specified by "tile1" and "tile2"?
1310 static int smaller_tile(struct gpu_array_tile
*tile
,
1311 struct gpu_array_tile
*tile1
, struct gpu_array_tile
*tile2
)
1314 isl_val
*size
, *size1
, *size2
;
1316 size
= gpu_array_tile_size(tile
);
1317 size1
= gpu_array_tile_size(tile1
);
1318 size2
= gpu_array_tile_size(tile2
);
1320 size
= isl_val_sub(size
, size1
);
1321 size
= isl_val_sub(size
, size2
);
1322 smaller
= isl_val_is_neg(size
);
1329 /* Given an initial grouping of array references and shared memory tiles
1330 * for each group that allows for a shared memory tile, merge two groups
1331 * if both have a shared memory tile, the merged group also has
1332 * a shared memory tile and the size of the tile for the merge group
1333 * is smaller than the sum of the tile sizes of the individual groups.
1335 * If merging two groups decreases the depth of the tile of
1336 * one or both of the two groups, then we need to check for overlapping
1339 * Return the number of groups after merging.
1340 * Return -1 on error.
1342 static int group_common_shared_memory_tile(struct ppcg_kernel
*kernel
,
1343 struct gpu_array_info
*array
, int n
,
1344 struct gpu_array_ref_group
**groups
, struct gpu_group_data
*data
)
1347 int recompute_overlap
= 0;
1349 for (i
= 0; i
< n
; ++i
) {
1350 if (!groups
[i
]->shared_tile
)
1352 for (j
= n
- 1; j
> i
; --j
) {
1353 struct gpu_array_ref_group
*group
;
1355 if (!groups
[j
]->shared_tile
)
1358 if (!depth_accesses_overlap(groups
[i
], groups
[j
]))
1361 group
= join_groups(groups
[i
], groups
[j
]);
1362 if (compute_group_bounds(kernel
, group
, data
) < 0) {
1363 gpu_array_ref_group_free(group
);
1366 if (!group
->shared_tile
||
1367 !smaller_tile(group
->shared_tile
,
1368 groups
[i
]->shared_tile
,
1369 groups
[j
]->shared_tile
)) {
1370 gpu_array_ref_group_free(group
);
1374 if (group
->min_depth
< groups
[i
]->min_depth
||
1375 group
->min_depth
< groups
[j
]->min_depth
)
1376 recompute_overlap
= 1;
1377 gpu_array_ref_group_free(groups
[i
]);
1378 gpu_array_ref_group_free(groups
[j
]);
1381 groups
[j
] = groups
[n
- 1];
1386 if (recompute_overlap
)
1387 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1391 /* Set array->n_group and array->groups to n and groups.
1393 * Additionally, set the "nr" field of each group.
1395 static void set_array_groups(struct gpu_local_array_info
*array
,
1396 int n
, struct gpu_array_ref_group
**groups
)
1401 array
->groups
= groups
;
1403 for (i
= 0; i
< n
; ++i
)
1407 /* Combine all groups in "groups" into a single group and return
1408 * the new number of groups (1 or 0 if there were no groups to start with).
1410 static int join_all_groups(int n
, struct gpu_array_ref_group
**groups
)
1414 for (i
= n
- 1; i
> 0; --i
) {
1415 groups
[0] = join_groups_and_free(groups
[0], groups
[i
]);
1423 /* Group array references that should be considered together when
1424 * deciding whether to access them from private, shared or global memory.
1425 * Return -1 on error.
1427 * In particular, if two array references overlap and if one of them
1428 * is a write, then the two references are grouped together.
1429 * We first perform an initial grouping based only on the access relation.
1430 * After computing shared and private memory tiles, we check for
1431 * overlapping writes again, but this time taking into account
1432 * the depth of the effective tile.
1434 * Furthermore, if two groups admit a shared memory tile and if the
1435 * combination of the two also admits a shared memory tile, we merge
1438 * If the array contains structures, then we compute a single
1439 * reference group without trying to find any tiles
1440 * since we do not map such arrays to private or shared
1441 * memory. The only exception is when those arrays of structures
1442 * are required to be mapped to private memory.
1444 static int group_array_references(struct ppcg_kernel
*kernel
,
1445 struct gpu_local_array_info
*local
, struct gpu_group_data
*data
)
1449 isl_ctx
*ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1450 struct gpu_array_ref_group
**groups
;
1452 groups
= isl_calloc_array(ctx
, struct gpu_array_ref_group
*,
1453 local
->array
->n_ref
);
1457 n
= populate_array_references(local
, groups
, data
);
1459 if (local
->array
->has_compound_element
&& !local
->force_private
) {
1460 n
= join_all_groups(n
, groups
);
1461 set_array_groups(local
, n
, groups
);
1465 n
= group_overlapping_writes(kernel
, n
, groups
, data
);
1467 for (i
= 0; i
< n
; ++i
)
1468 if (compute_group_bounds(kernel
, groups
[i
], data
) < 0)
1471 n
= group_depth_overlapping_writes(kernel
, n
, groups
, data
);
1473 n
= group_common_shared_memory_tile(kernel
, local
->array
,
1476 set_array_groups(local
, n
, groups
);
1481 for (i
= 0; i
< local
->array
->n_ref
; ++i
)
1482 gpu_array_ref_group_free(groups
[i
]);
1486 /* For each array in the input program that can be mapped to private memory,
1487 * check if there are any order dependences active inside the current kernel,
1488 * within the same iteration of the host schedule, i.e., the prefix
1489 * schedule at "node".
1490 * If so, mark the array as force_private so that its reference groups will be
1491 * mapped to a registers.
1493 * Note that the arrays that cannot be mapped to private memory have
1494 * had their order dependences added to prog->array_order and
1495 * subsequently to the coincidence constraints.
1497 static void check_can_be_private_live_ranges(struct ppcg_kernel
*kernel
,
1498 __isl_keep isl_schedule_node
*node
)
1501 isl_union_map
*sched
;
1502 isl_union_set
*domain
;
1503 isl_multi_union_pw_aff
*prefix
;
1504 isl_union_pw_multi_aff
*contraction
;
1506 if (!kernel
->options
->live_range_reordering
)
1509 kernel
->any_force_private
= 0;
1511 prefix
= isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(node
);
1512 contraction
= isl_union_pw_multi_aff_copy(kernel
->contraction
);
1513 prefix
= isl_multi_union_pw_aff_pullback_union_pw_multi_aff(prefix
,
1515 domain
= isl_union_set_copy(kernel
->expanded_domain
);
1516 domain
= isl_union_set_universe(domain
);
1518 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1519 struct gpu_local_array_info
*local
= &kernel
->array
[i
];
1520 isl_union_map
*order
;
1522 local
->force_private
= 0;
1523 if (!gpu_array_can_be_private(local
->array
))
1525 order
= isl_union_map_copy(local
->array
->dep_order
);
1526 order
= isl_union_map_intersect_domain(order
,
1527 isl_union_set_copy(domain
));
1528 order
= isl_union_map_intersect_range(order
,
1529 isl_union_set_copy(domain
));
1530 order
= isl_union_map_eq_at_multi_union_pw_aff(order
,
1531 isl_multi_union_pw_aff_copy(prefix
));
1532 if (!isl_union_map_is_empty(order
)) {
1533 local
->force_private
= 1;
1534 kernel
->any_force_private
= 1;
1536 isl_union_map_free(order
);
1539 isl_multi_union_pw_aff_free(prefix
);
1540 isl_union_set_free(domain
);
1543 /* Expand the domain of the schedule "s" by plugging in
1544 * the contraction "contraction" and return the result.
1546 static __isl_give isl_union_map
*expand(__isl_take isl_union_map
*s
,
1547 __isl_keep isl_union_pw_multi_aff
*contraction
)
1549 contraction
= isl_union_pw_multi_aff_copy(contraction
);
1550 s
= isl_union_map_preimage_domain_union_pw_multi_aff(s
, contraction
);
1554 /* Create a set of dimension data->thread_depth + data->n_thread
1555 * that equates the residue of the final data->n_thread dimensions
1556 * modulo the kernel->block_dim sizes to the thread identifiers.
1557 * Store the computed set in data->privatization.
1559 * The construction starts with the space of kernel->thread_filter,
1560 * which is known to reference all thread identifiers.
1562 static void compute_privatization(struct gpu_group_data
*data
,
1563 struct ppcg_kernel
*kernel
)
1568 isl_local_space
*ls
;
1571 ctx
= isl_union_map_get_ctx(data
->shared_sched
);
1572 space
= isl_union_set_get_space(kernel
->thread_filter
);
1573 space
= isl_space_set_from_params(space
);
1574 space
= isl_space_add_dims(space
, isl_dim_set
,
1575 data
->thread_depth
+ data
->n_thread
);
1576 set
= isl_set_universe(space
);
1577 space
= isl_set_get_space(set
);
1578 ls
= isl_local_space_from_space(space
);
1580 for (i
= 0; i
< data
->n_thread
; ++i
) {
1581 isl_aff
*aff
, *aff2
;
1587 aff
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1588 isl_dim_set
, data
->thread_depth
+ i
);
1589 v
= isl_val_int_from_si(ctx
, kernel
->block_dim
[i
]);
1590 aff
= isl_aff_mod_val(aff
, v
);
1591 id
= isl_id_list_get_id(kernel
->thread_ids
, i
);
1592 pos
= isl_set_find_dim_by_id(set
, isl_dim_param
, id
);
1594 aff2
= isl_aff_var_on_domain(isl_local_space_copy(ls
),
1595 isl_dim_param
, pos
);
1596 aff
= isl_aff_sub(aff
, aff2
);
1597 c
= isl_equality_from_aff(aff
);
1598 set
= isl_set_add_constraint(set
, c
);
1601 isl_local_space_free(ls
);
1602 data
->privatization
= set
;
1605 /* Return the prefix schedule at "node" as a relation
1606 * between domain elements and schedule dimensions after detecting
1607 * equalities in this relation.
1609 static __isl_give isl_union_map
*prefix_with_equalities(
1610 __isl_keep isl_schedule_node
*node
)
1612 isl_union_map
*schedule
;
1614 schedule
= isl_schedule_node_get_prefix_schedule_relation(node
);
1615 schedule
= isl_union_map_detect_equalities(schedule
);
1620 /* Group references of all arrays in "kernel".
1621 * "node" points to the kernel mark.
1622 * The mapping to shared memory in computed at the "shared" mark.
1624 * We first extract all required schedule information into
1625 * a gpu_group_data structure and then consider each array
1628 int gpu_group_references(struct ppcg_kernel
*kernel
,
1629 __isl_keep isl_schedule_node
*node
)
1633 isl_union_pw_multi_aff
*contraction
;
1634 struct gpu_group_data data
;
1636 check_can_be_private_live_ranges(kernel
, node
);
1638 data
.scop
= kernel
->prog
->scop
;
1640 data
.kernel_depth
= isl_schedule_node_get_schedule_depth(node
);
1641 data
.host_sched
= isl_schedule_node_get_prefix_schedule_relation(node
);
1643 node
= isl_schedule_node_copy(node
);
1644 node
= gpu_tree_move_down_to_shared(node
, kernel
->core
);
1645 data
.shared_depth
= isl_schedule_node_get_schedule_depth(node
);
1646 data
.shared_sched
= prefix_with_equalities(node
);
1648 node
= gpu_tree_move_down_to_thread(node
, kernel
->core
);
1649 node
= isl_schedule_node_child(node
, 0);
1650 data
.thread_depth
= isl_schedule_node_get_schedule_depth(node
);
1651 data
.n_thread
= isl_schedule_node_band_n_member(node
);
1652 if (data
.thread_depth
== data
.shared_depth
)
1653 data
.copy_sched
= isl_union_map_copy(data
.shared_sched
);
1655 data
.copy_sched
= prefix_with_equalities(node
);
1656 data
.thread_sched
= isl_union_map_copy(data
.copy_sched
);
1657 data
.thread_sched
= isl_union_map_flat_range_product(data
.thread_sched
,
1658 isl_schedule_node_band_get_partial_schedule_union_map(node
));
1659 data
.thread_sched
= isl_union_map_detect_equalities(data
.thread_sched
);
1661 contraction
= isl_union_pw_multi_aff_copy(kernel
->contraction
);
1662 data
.host_sched
= expand(data
.host_sched
, contraction
);
1663 data
.shared_sched
= expand(data
.shared_sched
, contraction
);
1664 if (data
.thread_depth
== data
.shared_depth
) {
1665 isl_union_map_free(data
.copy_sched
);
1666 data
.copy_sched
= isl_union_map_copy(data
.shared_sched
);
1668 data
.copy_sched
= expand(data
.copy_sched
, contraction
);
1670 data
.thread_sched
= expand(data
.thread_sched
, contraction
);
1671 isl_union_pw_multi_aff_free(contraction
);
1673 node
= isl_schedule_node_child(node
, 0);
1674 data
.full_sched
= isl_union_map_copy(data
.thread_sched
);
1675 data
.full_sched
= isl_union_map_flat_range_product(data
.full_sched
,
1676 isl_schedule_node_get_subtree_schedule_union_map(node
));
1677 isl_schedule_node_free(node
);
1679 compute_privatization(&data
, kernel
);
1681 for (i
= 0; i
< kernel
->n_array
; ++i
) {
1682 r
= group_array_references(kernel
, &kernel
->array
[i
], &data
);
1687 isl_union_map_free(data
.host_sched
);
1688 isl_union_map_free(data
.shared_sched
);
1689 isl_union_map_free(data
.copy_sched
);
1690 isl_union_map_free(data
.thread_sched
);
1691 isl_union_map_free(data
.full_sched
);
1692 isl_set_free(data
.privatization
);
1697 /* Given a description of an array tile "tile" and the "space"
1701 * where D represents the first tile->depth schedule dimensions
1702 * and A represents the array, construct an isl_multi_aff
1704 * { [D[i] -> A[a]] -> A'[a'] }
1706 * with A' a scaled down copy of A according to the shifts and strides
1707 * in "tile". In particular,
1709 * a' = (a + shift(i))/stride
1711 * "insert_array" represents
1715 * and is used to insert A into the domain of functions that only
1718 static __isl_give isl_multi_aff
*strided_tile(
1719 struct gpu_array_tile
*tile
, __isl_keep isl_space
*space
,
1720 __isl_keep isl_multi_aff
*insert_array
)
1724 isl_multi_aff
*shift
;
1725 isl_multi_val
*stride
;
1727 isl_local_space
*ls
;
1728 isl_multi_aff
*tiling
;
1730 ctx
= isl_space_get_ctx(space
);
1731 space2
= isl_space_domain(isl_space_copy(space
));
1732 ls
= isl_local_space_from_space(space2
);
1733 space2
= isl_space_range(isl_space_copy(space
));
1734 stride
= isl_multi_val_zero(space2
);
1735 shift
= isl_multi_aff_zero(isl_space_copy(space
));
1737 for (i
= 0; i
< tile
->n
; ++i
) {
1738 struct gpu_array_bound
*bound
= &tile
->bound
[i
];
1742 if (tile
->bound
[i
].shift
) {
1743 stride_i
= isl_val_copy(bound
->stride
);
1744 shift_i
= isl_aff_copy(bound
->shift
);
1746 stride_i
= isl_val_one(ctx
);
1747 shift_i
= isl_aff_zero_on_domain(
1748 isl_local_space_copy(ls
));
1751 stride
= isl_multi_val_set_val(stride
, i
, stride_i
);
1752 shift
= isl_multi_aff_set_aff(shift
, i
, shift_i
);
1754 isl_local_space_free(ls
);
1756 shift
= isl_multi_aff_pullback_multi_aff(shift
,
1757 isl_multi_aff_copy(insert_array
));
1759 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1760 tiling
= isl_multi_aff_add(tiling
, shift
);
1761 tiling
= isl_multi_aff_scale_down_multi_val(tiling
, stride
);
1766 /* Compute a tiling for the array reference group "group".
1768 * The tiling is of the form
1770 * { [D[i] -> A[a]] -> T[t] }
1772 * where D represents the first tile->depth schedule dimensions,
1773 * A represents the global array and T represents the shared or
1774 * private memory tile. The name of T is the name of the local
1777 * If there is any stride in the accesses, then the mapping is
1779 * t = (a + shift(i))/stride - lb(i)
1781 * otherwise, it is simply
1785 void gpu_array_ref_group_compute_tiling(struct gpu_array_ref_group
*group
)
1788 struct gpu_array_tile
*tile
;
1790 isl_multi_aff
*tiling
, *lb
, *insert_array
;
1794 tile
= gpu_array_ref_group_tile(group
);
1798 space
= isl_map_get_space(group
->access
);
1799 space
= isl_space_from_range(isl_space_range(space
));
1800 space
= isl_space_add_dims(space
, isl_dim_in
, tile
->depth
);
1801 insert_array
= isl_multi_aff_domain_map(isl_space_copy(space
));
1803 for (i
= 0; i
< tile
->n
; ++i
)
1804 if (tile
->bound
[i
].shift
)
1808 tiling
= strided_tile(tile
, space
, insert_array
);
1810 tiling
= isl_multi_aff_range_map(isl_space_copy(space
));
1812 lb
= isl_multi_aff_zero(space
);
1813 for (i
= 0; i
< tile
->n
; ++i
) {
1814 isl_aff
*lb_i
= isl_aff_copy(tile
->bound
[i
].lb
);
1815 lb
= isl_multi_aff_set_aff(lb
, i
, lb_i
);
1817 lb
= isl_multi_aff_pullback_multi_aff(lb
, insert_array
);
1819 tiling
= isl_multi_aff_sub(tiling
, lb
);
1821 p
= isl_printer_to_str(isl_multi_aff_get_ctx(tiling
));
1822 p
= gpu_array_ref_group_print_name(group
, p
);
1823 local_name
= isl_printer_get_str(p
);
1824 isl_printer_free(p
);
1825 tiling
= isl_multi_aff_set_tuple_name(tiling
, isl_dim_out
, local_name
);
1828 tile
->tiling
= tiling
;