print.c: directly include required headers
[ppcg.git] / gpu_array_tile.h
blob53b8e3db74c10f96618ec0da97b29a301bc51075
1 #ifndef GPU_ARRAY_TILE_H
2 #define GPU_ARRAY_TILE_H
4 #include <isl/aff_type.h>
5 #include <isl/map_type.h>
6 #include <isl/val.h>
8 /* The fields stride and shift only contain valid information
9 * if shift != NULL.
10 * If so, they express that current index is such that if you add shift,
11 * then the result is always a multiple of stride.
12 * Let D represent the initial tile->depth dimensions of the computed schedule.
13 * The spaces of "lb" and "shift" are of the form
15 * D -> [b]
17 struct gpu_array_bound {
18 isl_val *size;
19 isl_aff *lb;
21 isl_val *stride;
22 isl_aff *shift;
25 /* A tile of an outer array.
27 * requires_unroll is set if the schedule dimensions that are mapped
28 * to threads need to be unrolled for this (private) tile to be used.
30 * "depth" reflects the number of schedule dimensions that affect the tile.
31 * The copying into and/or out of the tile is performed at that depth.
33 * n is the dimension of the array.
34 * bound is an array of size "n" representing the lower bound
35 * and size for each index.
37 * tiling maps a tile in the global array to the corresponding
38 * shared/private memory tile and is of the form
40 * { [D[i] -> A[a]] -> T[(a + shift(i))/stride - lb(i)] }
42 * where D represents the initial "depth" dimensions
43 * of the computed schedule.
45 struct gpu_array_tile {
46 isl_ctx *ctx;
47 int requires_unroll;
48 int depth;
49 int n;
50 struct gpu_array_bound *bound;
51 isl_multi_aff *tiling;
54 struct gpu_array_tile *gpu_array_tile_create(isl_ctx *ctx, int n_index);
55 struct gpu_array_tile *gpu_array_tile_free(struct gpu_array_tile *tile);
57 __isl_give isl_val *gpu_array_tile_size(struct gpu_array_tile *tile);
59 #endif