1 //===------ PPCGCodeGeneration.cpp - Polly Accelerator Code Generation. ---===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Take a scop created by ScopInfo and map it to GPU code using the ppcg
10 // GPU mapping strategy.
12 //===----------------------------------------------------------------------===//
14 #include "polly/CodeGen/PPCGCodeGeneration.h"
15 #include "polly/CodeGen/CodeGeneration.h"
16 #include "polly/CodeGen/IslAst.h"
17 #include "polly/CodeGen/IslNodeBuilder.h"
18 #include "polly/CodeGen/PerfMonitor.h"
19 #include "polly/CodeGen/Utils.h"
20 #include "polly/DependenceInfo.h"
21 #include "polly/LinkAllPasses.h"
22 #include "polly/Options.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/ScopInfo.h"
25 #include "polly/Support/SCEVValidator.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/IR/IntrinsicsNVPTX.h"
29 #include "llvm/IR/LegacyPassManager.h"
30 #include "llvm/IR/Verifier.h"
31 #include "llvm/IRReader/IRReader.h"
32 #include "llvm/InitializePasses.h"
33 #include "llvm/Linker/Linker.h"
34 #include "llvm/Support/SourceMgr.h"
35 #include "llvm/Support/TargetRegistry.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Transforms/IPO/PassManagerBuilder.h"
38 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
39 #include "isl/union_map.h"
43 #include "ppcg/cuda.h"
45 #include "ppcg/ppcg.h"
48 #include "llvm/Support/Debug.h"
50 using namespace polly
;
53 #define DEBUG_TYPE "polly-codegen-ppcg"
55 static cl::opt
<bool> DumpSchedule("polly-acc-dump-schedule",
56 cl::desc("Dump the computed GPU Schedule"),
57 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
58 cl::cat(PollyCategory
));
61 DumpCode("polly-acc-dump-code",
62 cl::desc("Dump C code describing the GPU mapping"), cl::Hidden
,
63 cl::init(false), cl::ZeroOrMore
, cl::cat(PollyCategory
));
65 static cl::opt
<bool> DumpKernelIR("polly-acc-dump-kernel-ir",
66 cl::desc("Dump the kernel LLVM-IR"),
67 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
68 cl::cat(PollyCategory
));
70 static cl::opt
<bool> DumpKernelASM("polly-acc-dump-kernel-asm",
71 cl::desc("Dump the kernel assembly code"),
72 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
73 cl::cat(PollyCategory
));
75 static cl::opt
<bool> FastMath("polly-acc-fastmath",
76 cl::desc("Allow unsafe math optimizations"),
77 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
78 cl::cat(PollyCategory
));
79 static cl::opt
<bool> SharedMemory("polly-acc-use-shared",
80 cl::desc("Use shared memory"), cl::Hidden
,
81 cl::init(false), cl::ZeroOrMore
,
82 cl::cat(PollyCategory
));
83 static cl::opt
<bool> PrivateMemory("polly-acc-use-private",
84 cl::desc("Use private memory"), cl::Hidden
,
85 cl::init(false), cl::ZeroOrMore
,
86 cl::cat(PollyCategory
));
88 bool polly::PollyManagedMemory
;
89 static cl::opt
<bool, true>
90 XManagedMemory("polly-acc-codegen-managed-memory",
91 cl::desc("Generate Host kernel code assuming"
92 " that all memory has been"
93 " declared as managed memory"),
94 cl::location(PollyManagedMemory
), cl::Hidden
,
95 cl::init(false), cl::ZeroOrMore
, cl::cat(PollyCategory
));
98 FailOnVerifyModuleFailure("polly-acc-fail-on-verify-module-failure",
99 cl::desc("Fail and generate a backtrace if"
100 " verifyModule fails on the GPU "
102 cl::Hidden
, cl::init(false), cl::ZeroOrMore
,
103 cl::cat(PollyCategory
));
105 static cl::opt
<std::string
> CUDALibDevice(
106 "polly-acc-libdevice", cl::desc("Path to CUDA libdevice"), cl::Hidden
,
107 cl::init("/usr/local/cuda/nvvm/libdevice/libdevice.compute_20.10.ll"),
108 cl::ZeroOrMore
, cl::cat(PollyCategory
));
110 static cl::opt
<std::string
>
111 CudaVersion("polly-acc-cuda-version",
112 cl::desc("The CUDA version to compile for"), cl::Hidden
,
113 cl::init("sm_30"), cl::ZeroOrMore
, cl::cat(PollyCategory
));
116 MinCompute("polly-acc-mincompute",
117 cl::desc("Minimal number of compute statements to run on GPU."),
118 cl::Hidden
, cl::init(10 * 512 * 512));
120 extern bool polly::PerfMonitoring
;
122 /// Return a unique name for a Scop, which is the scop region with the
124 std::string
getUniqueScopName(const Scop
*S
) {
125 return "Scop Region: " + S
->getNameStr() +
126 " | Function: " + std::string(S
->getFunction().getName());
129 /// Used to store information PPCG wants for kills. This information is
130 /// used by live range reordering.
132 /// @see computeLiveRangeReordering
133 /// @see GPUNodeBuilder::createPPCGScop
134 /// @see GPUNodeBuilder::createPPCGProg
135 struct MustKillsInfo
{
136 /// Collection of all kill statements that will be sequenced at the end of
137 /// PPCGScop->schedule.
139 /// The nodes in `KillsSchedule` will be merged using `isl_schedule_set`
140 /// which merges schedules in *arbitrary* order.
141 /// (we don't care about the order of the kills anyway).
142 isl::schedule KillsSchedule
;
143 /// Map from kill statement instances to scalars that need to be
146 /// We currently derive kill information for:
147 /// 1. phi nodes. PHI nodes are not alive outside the scop and can
148 /// consequently all be killed.
149 /// 2. Scalar arrays that are not used outside the Scop. This is
150 /// checked by `isScalarUsesContainedInScop`.
151 /// [params] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
152 isl::union_map TaggedMustKills
;
154 /// Tagged must kills stripped of the tags.
155 /// [params] -> { Stmt_phantom[] -> scalar_to_kill[] }
156 isl::union_map MustKills
;
158 MustKillsInfo() : KillsSchedule(nullptr) {}
161 /// Check if SAI's uses are entirely contained within Scop S.
162 /// If a scalar is used only with a Scop, we are free to kill it, as no data
163 /// can flow in/out of the value any more.
164 /// @see computeMustKillsInfo
165 static bool isScalarUsesContainedInScop(const Scop
&S
,
166 const ScopArrayInfo
*SAI
) {
167 assert(SAI
->isValueKind() && "this function only deals with scalars."
168 " Dealing with arrays required alias analysis");
170 const Region
&R
= S
.getRegion();
171 for (User
*U
: SAI
->getBasePtr()->users()) {
172 Instruction
*I
= dyn_cast
<Instruction
>(U
);
173 assert(I
&& "invalid user of scop array info");
180 /// Compute must-kills needed to enable live range reordering with PPCG.
182 /// @params S The Scop to compute live range reordering information
183 /// @returns live range reordering information that can be used to setup
185 static MustKillsInfo
computeMustKillsInfo(const Scop
&S
) {
186 const isl::space ParamSpace
= S
.getParamSpace();
189 // 1. Collect all ScopArrayInfo that satisfy *any* of the criteria:
190 // 1.1 phi nodes in scop.
191 // 1.2 scalars that are only used within the scop
192 SmallVector
<isl::id
, 4> KillMemIds
;
193 for (ScopArrayInfo
*SAI
: S
.arrays()) {
194 if (SAI
->isPHIKind() ||
195 (SAI
->isValueKind() && isScalarUsesContainedInScop(S
, SAI
)))
196 KillMemIds
.push_back(isl::manage(SAI
->getBasePtrId().release()));
199 Info
.TaggedMustKills
= isl::union_map::empty(ParamSpace
);
200 Info
.MustKills
= isl::union_map::empty(ParamSpace
);
202 // Initialising KillsSchedule to `isl_set_empty` creates an empty node in the
204 // - filter: "[control] -> { }"
205 // So, we choose to not create this to keep the output a little nicer,
206 // at the cost of some code complexity.
207 Info
.KillsSchedule
= nullptr;
209 for (isl::id
&ToKillId
: KillMemIds
) {
210 isl::id KillStmtId
= isl::id::alloc(
212 std::string("SKill_phantom_").append(ToKillId
.get_name()), nullptr);
214 // NOTE: construction of tagged_must_kill:
215 // 2. We need to construct a map:
216 // [param] -> { [Stmt_phantom[] -> ref_phantom[]] -> scalar_to_kill[] }
217 // To construct this, we use `isl_map_domain_product` on 2 maps`:
219 // [param] -> { Stmt_phantom[] -> scalar_to_kill[] }
220 // 2b. PhantomRefToScalar:
221 // [param] -> { ref_phantom[] -> scalar_to_kill[] }
223 // Combining these with `isl_map_domain_product` gives us
225 // [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
227 // 2a. [param] -> { Stmt[] -> scalar_to_kill[] }
228 isl::map StmtToScalar
= isl::map::universe(ParamSpace
);
229 StmtToScalar
= StmtToScalar
.set_tuple_id(isl::dim::in
, isl::id(KillStmtId
));
230 StmtToScalar
= StmtToScalar
.set_tuple_id(isl::dim::out
, isl::id(ToKillId
));
232 isl::id PhantomRefId
= isl::id::alloc(
233 S
.getIslCtx(), std::string("ref_phantom") + ToKillId
.get_name(),
236 // 2b. [param] -> { phantom_ref[] -> scalar_to_kill[] }
237 isl::map PhantomRefToScalar
= isl::map::universe(ParamSpace
);
239 PhantomRefToScalar
.set_tuple_id(isl::dim::in
, PhantomRefId
);
241 PhantomRefToScalar
.set_tuple_id(isl::dim::out
, ToKillId
);
243 // 2. [param] -> { [Stmt[] -> phantom_ref[]] -> scalar_to_kill[] }
244 isl::map TaggedMustKill
= StmtToScalar
.domain_product(PhantomRefToScalar
);
245 Info
.TaggedMustKills
= Info
.TaggedMustKills
.unite(TaggedMustKill
);
247 // 2. [param] -> { Stmt[] -> scalar_to_kill[] }
248 Info
.MustKills
= Info
.TaggedMustKills
.domain_factor_domain();
250 // 3. Create the kill schedule of the form:
251 // "[param] -> { Stmt_phantom[] }"
252 // Then add this to Info.KillsSchedule.
253 isl::space KillStmtSpace
= ParamSpace
;
254 KillStmtSpace
= KillStmtSpace
.set_tuple_id(isl::dim::set
, KillStmtId
);
255 isl::union_set KillStmtDomain
= isl::set::universe(KillStmtSpace
);
257 isl::schedule KillSchedule
= isl::schedule::from_domain(KillStmtDomain
);
258 if (Info
.KillsSchedule
)
259 Info
.KillsSchedule
= isl::manage(
260 isl_schedule_set(Info
.KillsSchedule
.release(), KillSchedule
.copy()));
262 Info
.KillsSchedule
= KillSchedule
;
268 /// Create the ast expressions for a ScopStmt.
270 /// This function is a callback for to generate the ast expressions for each
271 /// of the scheduled ScopStmts.
272 static __isl_give isl_id_to_ast_expr
*pollyBuildAstExprForStmt(
273 void *StmtT
, __isl_take isl_ast_build
*Build_C
,
274 isl_multi_pw_aff
*(*FunctionIndex
)(__isl_take isl_multi_pw_aff
*MPA
,
275 isl_id
*Id
, void *User
),
277 isl_ast_expr
*(*FunctionExpr
)(isl_ast_expr
*Expr
, isl_id
*Id
, void *User
),
280 ScopStmt
*Stmt
= (ScopStmt
*)StmtT
;
282 if (!Stmt
|| !Build_C
)
285 isl::ast_build Build
= isl::manage_copy(Build_C
);
286 isl::ctx Ctx
= Build
.get_ctx();
287 isl::id_to_ast_expr RefToExpr
= isl::id_to_ast_expr::alloc(Ctx
, 0);
289 Stmt
->setAstBuild(Build
);
291 for (MemoryAccess
*Acc
: *Stmt
) {
292 isl::map AddrFunc
= Acc
->getAddressFunction();
293 AddrFunc
= AddrFunc
.intersect_domain(Stmt
->getDomain());
295 isl::id RefId
= Acc
->getId();
296 isl::pw_multi_aff PMA
= isl::pw_multi_aff::from_map(AddrFunc
);
298 isl::multi_pw_aff MPA
= isl::multi_pw_aff(PMA
);
299 MPA
= MPA
.coalesce();
300 MPA
= isl::manage(FunctionIndex(MPA
.release(), RefId
.get(), UserIndex
));
302 isl::ast_expr Access
= Build
.access_from(MPA
);
303 Access
= isl::manage(FunctionExpr(Access
.release(), RefId
.get(), UserExpr
));
304 RefToExpr
= RefToExpr
.set(RefId
, Access
);
307 return RefToExpr
.release();
310 /// Given a LLVM Type, compute its size in bytes,
311 static int computeSizeInBytes(const Type
*T
) {
312 int bytes
= T
->getPrimitiveSizeInBits() / 8;
314 bytes
= T
->getScalarSizeInBits() / 8;
318 /// Generate code for a GPU specific isl AST.
320 /// The GPUNodeBuilder augments the general existing IslNodeBuilder, which
321 /// generates code for general-purpose AST nodes, with special functionality
322 /// for generating GPU specific user nodes.
324 /// @see GPUNodeBuilder::createUser
325 class GPUNodeBuilder
: public IslNodeBuilder
{
327 GPUNodeBuilder(PollyIRBuilder
&Builder
, ScopAnnotator
&Annotator
,
328 const DataLayout
&DL
, LoopInfo
&LI
, ScalarEvolution
&SE
,
329 DominatorTree
&DT
, Scop
&S
, BasicBlock
*StartBlock
,
330 gpu_prog
*Prog
, GPURuntime Runtime
, GPUArch Arch
)
331 : IslNodeBuilder(Builder
, Annotator
, DL
, LI
, SE
, DT
, S
, StartBlock
),
332 Prog(Prog
), Runtime(Runtime
), Arch(Arch
) {
333 getExprBuilder().setIDToSAI(&IDToSAI
);
336 /// Create after-run-time-check initialization code.
337 void initializeAfterRTH();
339 /// Finalize the generated scop.
340 void finalize() override
;
342 /// Track if the full build process was successful.
344 /// This value is set to false, if throughout the build process an error
345 /// occurred which prevents us from generating valid GPU code.
346 bool BuildSuccessful
= true;
348 /// The maximal number of loops surrounding a sequential kernel.
349 unsigned DeepestSequential
= 0;
351 /// The maximal number of loops surrounding a parallel kernel.
352 unsigned DeepestParallel
= 0;
354 /// Return the name to set for the ptx_kernel.
355 std::string
getKernelFuncName(int Kernel_id
);
358 /// A vector of array base pointers for which a new ScopArrayInfo was created.
360 /// This vector is used to delete the ScopArrayInfo when it is not needed any
362 std::vector
<Value
*> LocalArrays
;
364 /// A map from ScopArrays to their corresponding device allocations.
365 std::map
<ScopArrayInfo
*, Value
*> DeviceAllocations
;
367 /// The current GPU context.
370 /// The set of isl_ids allocated in the kernel
371 std::vector
<isl_id
*> KernelIds
;
373 /// A module containing GPU code.
375 /// This pointer is only set in case we are currently generating GPU code.
376 std::unique_ptr
<Module
> GPUModule
;
378 /// The GPU program we generate code for.
381 /// The GPU Runtime implementation to use (OpenCL or CUDA).
384 /// The GPU Architecture to target.
387 /// Class to free isl_ids.
390 void operator()(__isl_take isl_id
*Id
) { isl_id_free(Id
); };
393 /// A set containing all isl_ids allocated in a GPU kernel.
395 /// By releasing this set all isl_ids will be freed.
396 std::set
<std::unique_ptr
<isl_id
, IslIdDeleter
>> KernelIDs
;
398 IslExprBuilder::IDToScopArrayInfoTy IDToSAI
;
400 /// Create code for user-defined AST nodes.
402 /// These AST nodes can be of type:
404 /// - ScopStmt: A computational statement (TODO)
405 /// - Kernel: A GPU kernel call (TODO)
406 /// - Data-Transfer: A GPU <-> CPU data-transfer
407 /// - In-kernel synchronization
408 /// - In-kernel memory copy statement
410 /// @param UserStmt The ast node to generate code for.
411 void createUser(__isl_take isl_ast_node
*UserStmt
) override
;
413 void createFor(__isl_take isl_ast_node
*Node
) override
;
415 enum DataDirection
{ HOST_TO_DEVICE
, DEVICE_TO_HOST
};
417 /// Create code for a data transfer statement
419 /// @param TransferStmt The data transfer statement.
420 /// @param Direction The direction in which to transfer data.
421 void createDataTransfer(__isl_take isl_ast_node
*TransferStmt
,
422 enum DataDirection Direction
);
424 /// Find llvm::Values referenced in GPU kernel.
426 /// @param Kernel The kernel to scan for llvm::Values
428 /// @returns A tuple, whose:
429 /// - First element contains the set of values referenced by the
431 /// - Second element contains the set of functions referenced by the
432 /// kernel. All functions in the set satisfy
433 /// `isValidFunctionInKernel`.
434 /// - Third element contains loops that have induction variables
435 /// which are used in the kernel, *and* these loops are *neither*
436 /// in the scop, nor do they immediately surroung the Scop.
437 /// See [Code generation of induction variables of loops outside
439 std::tuple
<SetVector
<Value
*>, SetVector
<Function
*>, SetVector
<const Loop
*>,
441 getReferencesInKernel(ppcg_kernel
*Kernel
);
443 /// Compute the sizes of the execution grid for a given kernel.
445 /// @param Kernel The kernel to compute grid sizes for.
447 /// @returns A tuple with grid sizes for X and Y dimension
448 std::tuple
<Value
*, Value
*> getGridSizes(ppcg_kernel
*Kernel
);
450 /// Get the managed array pointer for sending host pointers to the device.
452 /// This is to be used only with managed memory
453 Value
*getManagedDeviceArray(gpu_array_info
*Array
, ScopArrayInfo
*ArrayInfo
);
455 /// Compute the sizes of the thread blocks for a given kernel.
457 /// @param Kernel The kernel to compute thread block sizes for.
459 /// @returns A tuple with thread block sizes for X, Y, and Z dimensions.
460 std::tuple
<Value
*, Value
*, Value
*> getBlockSizes(ppcg_kernel
*Kernel
);
462 /// Store a specific kernel launch parameter in the array of kernel launch
465 /// @param Parameters The list of parameters in which to store.
466 /// @param Param The kernel launch parameter to store.
467 /// @param Index The index in the parameter list, at which to store the
469 void insertStoreParameter(Instruction
*Parameters
, Instruction
*Param
,
472 /// Create kernel launch parameters.
474 /// @param Kernel The kernel to create parameters for.
475 /// @param F The kernel function that has been created.
476 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
478 /// @returns A stack allocated array with pointers to the parameter
479 /// values that are passed to the kernel.
480 Value
*createLaunchParameters(ppcg_kernel
*Kernel
, Function
*F
,
481 SetVector
<Value
*> SubtreeValues
);
483 /// Create declarations for kernel variable.
485 /// This includes shared memory declarations.
487 /// @param Kernel The kernel definition to create variables for.
488 /// @param FN The function into which to generate the variables.
489 void createKernelVariables(ppcg_kernel
*Kernel
, Function
*FN
);
491 /// Add CUDA annotations to module.
493 /// Add a set of CUDA annotations that declares the maximal block dimensions
494 /// that will be used to execute the CUDA kernel. This allows the NVIDIA
495 /// PTX compiler to bound the number of allocated registers to ensure the
496 /// resulting kernel is known to run with up to as many block dimensions
497 /// as specified here.
499 /// @param M The module to add the annotations to.
500 /// @param BlockDimX The size of block dimension X.
501 /// @param BlockDimY The size of block dimension Y.
502 /// @param BlockDimZ The size of block dimension Z.
503 void addCUDAAnnotations(Module
*M
, Value
*BlockDimX
, Value
*BlockDimY
,
506 /// Create GPU kernel.
508 /// Code generate the kernel described by @p KernelStmt.
510 /// @param KernelStmt The ast node to generate kernel code for.
511 void createKernel(__isl_take isl_ast_node
*KernelStmt
);
513 /// Generate code that computes the size of an array.
515 /// @param Array The array for which to compute a size.
516 Value
*getArraySize(gpu_array_info
*Array
);
518 /// Generate code to compute the minimal offset at which an array is accessed.
520 /// The offset of an array is the minimal array location accessed in a scop.
524 /// for (long i = 0; i < 100; i++)
527 /// getArrayOffset(A) results in 42.
529 /// @param Array The array for which to compute the offset.
530 /// @returns An llvm::Value that contains the offset of the array.
531 Value
*getArrayOffset(gpu_array_info
*Array
);
533 /// Prepare the kernel arguments for kernel code generation
535 /// @param Kernel The kernel to generate code for.
536 /// @param FN The function created for the kernel.
537 void prepareKernelArguments(ppcg_kernel
*Kernel
, Function
*FN
);
539 /// Create kernel function.
541 /// Create a kernel function located in a newly created module that can serve
542 /// as target for device code generation. Set the Builder to point to the
543 /// start block of this newly created function.
545 /// @param Kernel The kernel to generate code for.
546 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
547 /// @param SubtreeFunctions The set of llvm::Functions referenced by this
549 void createKernelFunction(ppcg_kernel
*Kernel
,
550 SetVector
<Value
*> &SubtreeValues
,
551 SetVector
<Function
*> &SubtreeFunctions
);
553 /// Create the declaration of a kernel function.
555 /// The kernel function takes as arguments:
557 /// - One i8 pointer for each external array reference used in the kernel.
560 /// - Other LLVM Value references (TODO)
562 /// @param Kernel The kernel to generate the function declaration for.
563 /// @param SubtreeValues The set of llvm::Values referenced by this kernel.
565 /// @returns The newly declared function.
566 Function
*createKernelFunctionDecl(ppcg_kernel
*Kernel
,
567 SetVector
<Value
*> &SubtreeValues
);
569 /// Insert intrinsic functions to obtain thread and block ids.
571 /// @param The kernel to generate the intrinsic functions for.
572 void insertKernelIntrinsics(ppcg_kernel
*Kernel
);
574 /// Insert function calls to retrieve the SPIR group/local ids.
576 /// @param Kernel The kernel to generate the function calls for.
577 /// @param SizeTypeIs64Bit Whether size_t of the openCl device is 64bit.
578 void insertKernelCallsSPIR(ppcg_kernel
*Kernel
, bool SizeTypeIs64bit
);
580 /// Setup the creation of functions referenced by the GPU kernel.
582 /// 1. Create new function declarations in GPUModule which are the same as
583 /// SubtreeFunctions.
585 /// 2. Populate IslNodeBuilder::ValueMap with mappings from
586 /// old functions (that come from the original module) to new functions
587 /// (that are created within GPUModule). That way, we generate references
588 /// to the correct function (in GPUModule) in BlockGenerator.
590 /// @see IslNodeBuilder::ValueMap
591 /// @see BlockGenerator::GlobalMap
592 /// @see BlockGenerator::getNewValue
593 /// @see GPUNodeBuilder::getReferencesInKernel.
595 /// @param SubtreeFunctions The set of llvm::Functions referenced by
597 void setupKernelSubtreeFunctions(SetVector
<Function
*> SubtreeFunctions
);
599 /// Create a global-to-shared or shared-to-global copy statement.
601 /// @param CopyStmt The copy statement to generate code for
602 void createKernelCopy(ppcg_kernel_stmt
*CopyStmt
);
604 /// Create code for a ScopStmt called in @p Expr.
606 /// @param Expr The expression containing the call.
607 /// @param KernelStmt The kernel statement referenced in the call.
608 void createScopStmt(isl_ast_expr
*Expr
, ppcg_kernel_stmt
*KernelStmt
);
610 /// Create an in-kernel synchronization call.
611 void createKernelSync();
613 /// Create a PTX assembly string for the current GPU kernel.
615 /// @returns A string containing the corresponding PTX assembly code.
616 std::string
createKernelASM();
618 /// Remove references from the dominator tree to the kernel function @p F.
620 /// @param F The function to remove references to.
621 void clearDominators(Function
*F
);
623 /// Remove references from scalar evolution to the kernel function @p F.
625 /// @param F The function to remove references to.
626 void clearScalarEvolution(Function
*F
);
628 /// Remove references from loop info to the kernel function @p F.
630 /// @param F The function to remove references to.
631 void clearLoops(Function
*F
);
633 /// Check if the scop requires to be linked with CUDA's libdevice.
634 bool requiresCUDALibDevice();
636 /// Link with the NVIDIA libdevice library (if needed and available).
637 void addCUDALibDevice();
639 /// Finalize the generation of the kernel function.
641 /// Free the LLVM-IR module corresponding to the kernel and -- if requested --
642 /// dump its IR to stderr.
644 /// @returns The Assembly string of the kernel.
645 std::string
finalizeKernelFunction();
647 /// Finalize the generation of the kernel arguments.
649 /// This function ensures that not-read-only scalars used in a kernel are
650 /// stored back to the global memory location they are backed with before
651 /// the kernel terminates.
653 /// @params Kernel The kernel to finalize kernel arguments for.
654 void finalizeKernelArguments(ppcg_kernel
*Kernel
);
656 /// Create code that allocates memory to store arrays on device.
657 void allocateDeviceArrays();
659 /// Create code to prepare the managed device pointers.
660 void prepareManagedDeviceArrays();
662 /// Free all allocated device arrays.
663 void freeDeviceArrays();
665 /// Create a call to initialize the GPU context.
667 /// @returns A pointer to the newly initialized context.
668 Value
*createCallInitContext();
670 /// Create a call to get the device pointer for a kernel allocation.
672 /// @param Allocation The Polly GPU allocation
674 /// @returns The device parameter corresponding to this allocation.
675 Value
*createCallGetDevicePtr(Value
*Allocation
);
677 /// Create a call to free the GPU context.
679 /// @param Context A pointer to an initialized GPU context.
680 void createCallFreeContext(Value
*Context
);
682 /// Create a call to allocate memory on the device.
684 /// @param Size The size of memory to allocate
686 /// @returns A pointer that identifies this allocation.
687 Value
*createCallAllocateMemoryForDevice(Value
*Size
);
689 /// Create a call to free a device array.
691 /// @param Array The device array to free.
692 void createCallFreeDeviceMemory(Value
*Array
);
694 /// Create a call to copy data from host to device.
696 /// @param HostPtr A pointer to the host data that should be copied.
697 /// @param DevicePtr A device pointer specifying the location to copy to.
698 void createCallCopyFromHostToDevice(Value
*HostPtr
, Value
*DevicePtr
,
701 /// Create a call to copy data from device to host.
703 /// @param DevicePtr A pointer to the device data that should be copied.
704 /// @param HostPtr A host pointer specifying the location to copy to.
705 void createCallCopyFromDeviceToHost(Value
*DevicePtr
, Value
*HostPtr
,
708 /// Create a call to synchronize Host & Device.
710 /// This is to be used only with managed memory.
711 void createCallSynchronizeDevice();
713 /// Create a call to get a kernel from an assembly string.
715 /// @param Buffer The string describing the kernel.
716 /// @param Entry The name of the kernel function to call.
718 /// @returns A pointer to a kernel object
719 Value
*createCallGetKernel(Value
*Buffer
, Value
*Entry
);
721 /// Create a call to free a GPU kernel.
723 /// @param GPUKernel THe kernel to free.
724 void createCallFreeKernel(Value
*GPUKernel
);
726 /// Create a call to launch a GPU kernel.
728 /// @param GPUKernel The kernel to launch.
729 /// @param GridDimX The size of the first grid dimension.
730 /// @param GridDimY The size of the second grid dimension.
731 /// @param GridBlockX The size of the first block dimension.
732 /// @param GridBlockY The size of the second block dimension.
733 /// @param GridBlockZ The size of the third block dimension.
734 /// @param Parameters A pointer to an array that contains itself pointers to
735 /// the parameter values passed for each kernel argument.
736 void createCallLaunchKernel(Value
*GPUKernel
, Value
*GridDimX
,
737 Value
*GridDimY
, Value
*BlockDimX
,
738 Value
*BlockDimY
, Value
*BlockDimZ
,
742 std::string
GPUNodeBuilder::getKernelFuncName(int Kernel_id
) {
743 return "FUNC_" + S
.getFunction().getName().str() + "_SCOP_" +
744 std::to_string(S
.getID()) + "_KERNEL_" + std::to_string(Kernel_id
);
747 void GPUNodeBuilder::initializeAfterRTH() {
748 BasicBlock
*NewBB
= SplitBlock(Builder
.GetInsertBlock(),
749 &*Builder
.GetInsertPoint(), &DT
, &LI
);
750 NewBB
->setName("polly.acc.initialize");
751 Builder
.SetInsertPoint(&NewBB
->front());
753 GPUContext
= createCallInitContext();
755 if (!PollyManagedMemory
)
756 allocateDeviceArrays();
758 prepareManagedDeviceArrays();
761 void GPUNodeBuilder::finalize() {
762 if (!PollyManagedMemory
)
765 createCallFreeContext(GPUContext
);
766 IslNodeBuilder::finalize();
769 void GPUNodeBuilder::allocateDeviceArrays() {
770 assert(!PollyManagedMemory
&&
771 "Managed memory will directly send host pointers "
772 "to the kernel. There is no need for device arrays");
773 isl_ast_build
*Build
= isl_ast_build_from_context(S
.getContext().release());
775 for (int i
= 0; i
< Prog
->n_array
; ++i
) {
776 gpu_array_info
*Array
= &Prog
->array
[i
];
777 auto *ScopArray
= (ScopArrayInfo
*)Array
->user
;
778 std::string
DevArrayName("p_dev_array_");
779 DevArrayName
.append(Array
->name
);
781 Value
*ArraySize
= getArraySize(Array
);
782 Value
*Offset
= getArrayOffset(Array
);
784 ArraySize
= Builder
.CreateSub(
786 Builder
.CreateMul(Offset
,
787 Builder
.getInt64(ScopArray
->getElemSizeInBytes())));
788 const SCEV
*SizeSCEV
= SE
.getSCEV(ArraySize
);
789 // It makes no sense to have an array of size 0. The CUDA API will
790 // throw an error anyway if we invoke `cuMallocManaged` with size `0`. We
791 // choose to be defensive and catch this at the compile phase. It is
792 // most likely that we are doing something wrong with size computation.
793 if (SizeSCEV
->isZero()) {
794 errs() << getUniqueScopName(&S
)
795 << " has computed array size 0: " << *ArraySize
796 << " | for array: " << *(ScopArray
->getBasePtr())
797 << ". This is illegal, exiting.\n";
798 report_fatal_error("array size was computed to be 0");
801 Value
*DevArray
= createCallAllocateMemoryForDevice(ArraySize
);
802 DevArray
->setName(DevArrayName
);
803 DeviceAllocations
[ScopArray
] = DevArray
;
806 isl_ast_build_free(Build
);
809 void GPUNodeBuilder::prepareManagedDeviceArrays() {
810 assert(PollyManagedMemory
&&
811 "Device array most only be prepared in managed-memory mode");
812 for (int i
= 0; i
< Prog
->n_array
; ++i
) {
813 gpu_array_info
*Array
= &Prog
->array
[i
];
814 ScopArrayInfo
*ScopArray
= (ScopArrayInfo
*)Array
->user
;
817 if (gpu_array_is_scalar(Array
))
818 HostPtr
= BlockGen
.getOrCreateAlloca(ScopArray
);
820 HostPtr
= ScopArray
->getBasePtr();
821 HostPtr
= getLatestValue(HostPtr
);
823 Value
*Offset
= getArrayOffset(Array
);
825 HostPtr
= Builder
.CreatePointerCast(
826 HostPtr
, ScopArray
->getElementType()->getPointerTo());
827 HostPtr
= Builder
.CreateGEP(HostPtr
, Offset
);
830 HostPtr
= Builder
.CreatePointerCast(HostPtr
, Builder
.getInt8PtrTy());
831 DeviceAllocations
[ScopArray
] = HostPtr
;
835 void GPUNodeBuilder::addCUDAAnnotations(Module
*M
, Value
*BlockDimX
,
836 Value
*BlockDimY
, Value
*BlockDimZ
) {
837 auto AnnotationNode
= M
->getOrInsertNamedMetadata("nvvm.annotations");
840 if (F
.getCallingConv() != CallingConv::PTX_Kernel
)
843 Value
*V
[] = {BlockDimX
, BlockDimY
, BlockDimZ
};
845 Metadata
*Elements
[] = {
846 ValueAsMetadata::get(&F
), MDString::get(M
->getContext(), "maxntidx"),
847 ValueAsMetadata::get(V
[0]), MDString::get(M
->getContext(), "maxntidy"),
848 ValueAsMetadata::get(V
[1]), MDString::get(M
->getContext(), "maxntidz"),
849 ValueAsMetadata::get(V
[2]),
851 MDNode
*Node
= MDNode::get(M
->getContext(), Elements
);
852 AnnotationNode
->addOperand(Node
);
856 void GPUNodeBuilder::freeDeviceArrays() {
857 assert(!PollyManagedMemory
&& "Managed memory does not use device arrays");
858 for (auto &Array
: DeviceAllocations
)
859 createCallFreeDeviceMemory(Array
.second
);
862 Value
*GPUNodeBuilder::createCallGetKernel(Value
*Buffer
, Value
*Entry
) {
863 const char *Name
= "polly_getKernel";
864 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
865 Function
*F
= M
->getFunction(Name
);
867 // If F is not available, declare it.
869 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
870 std::vector
<Type
*> Args
;
871 Args
.push_back(Builder
.getInt8PtrTy());
872 Args
.push_back(Builder
.getInt8PtrTy());
873 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
874 F
= Function::Create(Ty
, Linkage
, Name
, M
);
877 return Builder
.CreateCall(F
, {Buffer
, Entry
});
880 Value
*GPUNodeBuilder::createCallGetDevicePtr(Value
*Allocation
) {
881 const char *Name
= "polly_getDevicePtr";
882 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
883 Function
*F
= M
->getFunction(Name
);
885 // If F is not available, declare it.
887 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
888 std::vector
<Type
*> Args
;
889 Args
.push_back(Builder
.getInt8PtrTy());
890 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
891 F
= Function::Create(Ty
, Linkage
, Name
, M
);
894 return Builder
.CreateCall(F
, {Allocation
});
897 void GPUNodeBuilder::createCallLaunchKernel(Value
*GPUKernel
, Value
*GridDimX
,
898 Value
*GridDimY
, Value
*BlockDimX
,
899 Value
*BlockDimY
, Value
*BlockDimZ
,
901 const char *Name
= "polly_launchKernel";
902 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
903 Function
*F
= M
->getFunction(Name
);
905 // If F is not available, declare it.
907 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
908 std::vector
<Type
*> Args
;
909 Args
.push_back(Builder
.getInt8PtrTy());
910 Args
.push_back(Builder
.getInt32Ty());
911 Args
.push_back(Builder
.getInt32Ty());
912 Args
.push_back(Builder
.getInt32Ty());
913 Args
.push_back(Builder
.getInt32Ty());
914 Args
.push_back(Builder
.getInt32Ty());
915 Args
.push_back(Builder
.getInt8PtrTy());
916 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
917 F
= Function::Create(Ty
, Linkage
, Name
, M
);
920 Builder
.CreateCall(F
, {GPUKernel
, GridDimX
, GridDimY
, BlockDimX
, BlockDimY
,
921 BlockDimZ
, Parameters
});
924 void GPUNodeBuilder::createCallFreeKernel(Value
*GPUKernel
) {
925 const char *Name
= "polly_freeKernel";
926 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
927 Function
*F
= M
->getFunction(Name
);
929 // If F is not available, declare it.
931 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
932 std::vector
<Type
*> Args
;
933 Args
.push_back(Builder
.getInt8PtrTy());
934 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
935 F
= Function::Create(Ty
, Linkage
, Name
, M
);
938 Builder
.CreateCall(F
, {GPUKernel
});
941 void GPUNodeBuilder::createCallFreeDeviceMemory(Value
*Array
) {
942 assert(!PollyManagedMemory
&&
943 "Managed memory does not allocate or free memory "
945 const char *Name
= "polly_freeDeviceMemory";
946 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
947 Function
*F
= M
->getFunction(Name
);
949 // If F is not available, declare it.
951 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
952 std::vector
<Type
*> Args
;
953 Args
.push_back(Builder
.getInt8PtrTy());
954 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
955 F
= Function::Create(Ty
, Linkage
, Name
, M
);
958 Builder
.CreateCall(F
, {Array
});
961 Value
*GPUNodeBuilder::createCallAllocateMemoryForDevice(Value
*Size
) {
962 assert(!PollyManagedMemory
&&
963 "Managed memory does not allocate or free memory "
965 const char *Name
= "polly_allocateMemoryForDevice";
966 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
967 Function
*F
= M
->getFunction(Name
);
969 // If F is not available, declare it.
971 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
972 std::vector
<Type
*> Args
;
973 Args
.push_back(Builder
.getInt64Ty());
974 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
975 F
= Function::Create(Ty
, Linkage
, Name
, M
);
978 return Builder
.CreateCall(F
, {Size
});
981 void GPUNodeBuilder::createCallCopyFromHostToDevice(Value
*HostData
,
984 assert(!PollyManagedMemory
&&
985 "Managed memory does not transfer memory between "
987 const char *Name
= "polly_copyFromHostToDevice";
988 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
989 Function
*F
= M
->getFunction(Name
);
991 // If F is not available, declare it.
993 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
994 std::vector
<Type
*> Args
;
995 Args
.push_back(Builder
.getInt8PtrTy());
996 Args
.push_back(Builder
.getInt8PtrTy());
997 Args
.push_back(Builder
.getInt64Ty());
998 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
999 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1002 Builder
.CreateCall(F
, {HostData
, DeviceData
, Size
});
1005 void GPUNodeBuilder::createCallCopyFromDeviceToHost(Value
*DeviceData
,
1008 assert(!PollyManagedMemory
&&
1009 "Managed memory does not transfer memory between "
1011 const char *Name
= "polly_copyFromDeviceToHost";
1012 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1013 Function
*F
= M
->getFunction(Name
);
1015 // If F is not available, declare it.
1017 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1018 std::vector
<Type
*> Args
;
1019 Args
.push_back(Builder
.getInt8PtrTy());
1020 Args
.push_back(Builder
.getInt8PtrTy());
1021 Args
.push_back(Builder
.getInt64Ty());
1022 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1023 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1026 Builder
.CreateCall(F
, {DeviceData
, HostData
, Size
});
1029 void GPUNodeBuilder::createCallSynchronizeDevice() {
1030 assert(PollyManagedMemory
&& "explicit synchronization is only necessary for "
1032 const char *Name
= "polly_synchronizeDevice";
1033 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1034 Function
*F
= M
->getFunction(Name
);
1036 // If F is not available, declare it.
1038 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1039 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), false);
1040 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1043 Builder
.CreateCall(F
);
1046 Value
*GPUNodeBuilder::createCallInitContext() {
1050 case GPURuntime::CUDA
:
1051 Name
= "polly_initContextCUDA";
1053 case GPURuntime::OpenCL
:
1054 Name
= "polly_initContextCL";
1058 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1059 Function
*F
= M
->getFunction(Name
);
1061 // If F is not available, declare it.
1063 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1064 std::vector
<Type
*> Args
;
1065 FunctionType
*Ty
= FunctionType::get(Builder
.getInt8PtrTy(), Args
, false);
1066 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1069 return Builder
.CreateCall(F
, {});
1072 void GPUNodeBuilder::createCallFreeContext(Value
*Context
) {
1073 const char *Name
= "polly_freeContext";
1074 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1075 Function
*F
= M
->getFunction(Name
);
1077 // If F is not available, declare it.
1079 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1080 std::vector
<Type
*> Args
;
1081 Args
.push_back(Builder
.getInt8PtrTy());
1082 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1083 F
= Function::Create(Ty
, Linkage
, Name
, M
);
1086 Builder
.CreateCall(F
, {Context
});
1089 /// Check if one string is a prefix of another.
1091 /// @param String The string in which to look for the prefix.
1092 /// @param Prefix The prefix to look for.
1093 static bool isPrefix(std::string String
, std::string Prefix
) {
1094 return String
.find(Prefix
) == 0;
1097 Value
*GPUNodeBuilder::getArraySize(gpu_array_info
*Array
) {
1098 isl::ast_build Build
= isl::ast_build::from_context(S
.getContext());
1099 Value
*ArraySize
= ConstantInt::get(Builder
.getInt64Ty(), Array
->size
);
1101 if (!gpu_array_is_scalar(Array
)) {
1102 isl::multi_pw_aff ArrayBound
= isl::manage_copy(Array
->bound
);
1104 isl::pw_aff OffsetDimZero
= ArrayBound
.get_pw_aff(0);
1105 isl::ast_expr Res
= Build
.expr_from(OffsetDimZero
);
1107 for (unsigned int i
= 1; i
< Array
->n_index
; i
++) {
1108 isl::pw_aff Bound_I
= ArrayBound
.get_pw_aff(i
);
1109 isl::ast_expr Expr
= Build
.expr_from(Bound_I
);
1110 Res
= Res
.mul(Expr
);
1113 Value
*NumElements
= ExprBuilder
.create(Res
.release());
1114 if (NumElements
->getType() != ArraySize
->getType())
1115 NumElements
= Builder
.CreateSExt(NumElements
, ArraySize
->getType());
1116 ArraySize
= Builder
.CreateMul(ArraySize
, NumElements
);
1121 Value
*GPUNodeBuilder::getArrayOffset(gpu_array_info
*Array
) {
1122 if (gpu_array_is_scalar(Array
))
1125 isl::ast_build Build
= isl::ast_build::from_context(S
.getContext());
1127 isl::set Min
= isl::manage_copy(Array
->extent
).lexmin();
1129 isl::set ZeroSet
= isl::set::universe(Min
.get_space());
1131 for (long i
= 0, n
= Min
.dim(isl::dim::set
); i
< n
; i
++)
1132 ZeroSet
= ZeroSet
.fix_si(isl::dim::set
, i
, 0);
1134 if (Min
.is_subset(ZeroSet
)) {
1138 isl::ast_expr Result
= isl::ast_expr::from_val(isl::val(Min
.get_ctx(), 0));
1140 for (long i
= 0, n
= Min
.dim(isl::dim::set
); i
< n
; i
++) {
1142 isl::pw_aff Bound_I
=
1143 isl::manage(isl_multi_pw_aff_get_pw_aff(Array
->bound
, i
- 1));
1144 isl::ast_expr BExpr
= Build
.expr_from(Bound_I
);
1145 Result
= Result
.mul(BExpr
);
1147 isl::pw_aff DimMin
= Min
.dim_min(i
);
1148 isl::ast_expr MExpr
= Build
.expr_from(DimMin
);
1149 Result
= Result
.add(MExpr
);
1152 return ExprBuilder
.create(Result
.release());
1155 Value
*GPUNodeBuilder::getManagedDeviceArray(gpu_array_info
*Array
,
1156 ScopArrayInfo
*ArrayInfo
) {
1157 assert(PollyManagedMemory
&& "Only used when you wish to get a host "
1158 "pointer for sending data to the kernel, "
1159 "with managed memory");
1160 std::map
<ScopArrayInfo
*, Value
*>::iterator it
;
1161 it
= DeviceAllocations
.find(ArrayInfo
);
1162 assert(it
!= DeviceAllocations
.end() &&
1163 "Device array expected to be available");
1167 void GPUNodeBuilder::createDataTransfer(__isl_take isl_ast_node
*TransferStmt
,
1168 enum DataDirection Direction
) {
1169 assert(!PollyManagedMemory
&& "Managed memory needs no data transfers");
1170 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(TransferStmt
);
1171 isl_ast_expr
*Arg
= isl_ast_expr_get_op_arg(Expr
, 0);
1172 isl_id
*Id
= isl_ast_expr_get_id(Arg
);
1173 auto Array
= (gpu_array_info
*)isl_id_get_user(Id
);
1174 auto ScopArray
= (ScopArrayInfo
*)(Array
->user
);
1176 Value
*Size
= getArraySize(Array
);
1177 Value
*Offset
= getArrayOffset(Array
);
1178 Value
*DevPtr
= DeviceAllocations
[ScopArray
];
1182 if (gpu_array_is_scalar(Array
))
1183 HostPtr
= BlockGen
.getOrCreateAlloca(ScopArray
);
1185 HostPtr
= ScopArray
->getBasePtr();
1186 HostPtr
= getLatestValue(HostPtr
);
1189 HostPtr
= Builder
.CreatePointerCast(
1190 HostPtr
, ScopArray
->getElementType()->getPointerTo());
1191 HostPtr
= Builder
.CreateGEP(HostPtr
, Offset
);
1194 HostPtr
= Builder
.CreatePointerCast(HostPtr
, Builder
.getInt8PtrTy());
1197 Size
= Builder
.CreateSub(
1198 Size
, Builder
.CreateMul(
1199 Offset
, Builder
.getInt64(ScopArray
->getElemSizeInBytes())));
1202 if (Direction
== HOST_TO_DEVICE
)
1203 createCallCopyFromHostToDevice(HostPtr
, DevPtr
, Size
);
1205 createCallCopyFromDeviceToHost(DevPtr
, HostPtr
, Size
);
1208 isl_ast_expr_free(Arg
);
1209 isl_ast_expr_free(Expr
);
1210 isl_ast_node_free(TransferStmt
);
1213 void GPUNodeBuilder::createUser(__isl_take isl_ast_node
*UserStmt
) {
1214 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(UserStmt
);
1215 isl_ast_expr
*StmtExpr
= isl_ast_expr_get_op_arg(Expr
, 0);
1216 isl_id
*Id
= isl_ast_expr_get_id(StmtExpr
);
1218 isl_ast_expr_free(StmtExpr
);
1220 const char *Str
= isl_id_get_name(Id
);
1221 if (!strcmp(Str
, "kernel")) {
1222 createKernel(UserStmt
);
1223 if (PollyManagedMemory
)
1224 createCallSynchronizeDevice();
1225 isl_ast_expr_free(Expr
);
1228 if (!strcmp(Str
, "init_device")) {
1229 initializeAfterRTH();
1230 isl_ast_node_free(UserStmt
);
1231 isl_ast_expr_free(Expr
);
1234 if (!strcmp(Str
, "clear_device")) {
1236 isl_ast_node_free(UserStmt
);
1237 isl_ast_expr_free(Expr
);
1240 if (isPrefix(Str
, "to_device")) {
1241 if (!PollyManagedMemory
)
1242 createDataTransfer(UserStmt
, HOST_TO_DEVICE
);
1244 isl_ast_node_free(UserStmt
);
1246 isl_ast_expr_free(Expr
);
1250 if (isPrefix(Str
, "from_device")) {
1251 if (!PollyManagedMemory
) {
1252 createDataTransfer(UserStmt
, DEVICE_TO_HOST
);
1254 isl_ast_node_free(UserStmt
);
1256 isl_ast_expr_free(Expr
);
1260 isl_id
*Anno
= isl_ast_node_get_annotation(UserStmt
);
1261 struct ppcg_kernel_stmt
*KernelStmt
=
1262 (struct ppcg_kernel_stmt
*)isl_id_get_user(Anno
);
1265 switch (KernelStmt
->type
) {
1266 case ppcg_kernel_domain
:
1267 createScopStmt(Expr
, KernelStmt
);
1268 isl_ast_node_free(UserStmt
);
1270 case ppcg_kernel_copy
:
1271 createKernelCopy(KernelStmt
);
1272 isl_ast_expr_free(Expr
);
1273 isl_ast_node_free(UserStmt
);
1275 case ppcg_kernel_sync
:
1277 isl_ast_expr_free(Expr
);
1278 isl_ast_node_free(UserStmt
);
1282 isl_ast_expr_free(Expr
);
1283 isl_ast_node_free(UserStmt
);
1286 void GPUNodeBuilder::createFor(__isl_take isl_ast_node
*Node
) {
1287 createForSequential(isl::manage(Node
), false);
1290 void GPUNodeBuilder::createKernelCopy(ppcg_kernel_stmt
*KernelStmt
) {
1291 isl_ast_expr
*LocalIndex
= isl_ast_expr_copy(KernelStmt
->u
.c
.local_index
);
1292 LocalIndex
= isl_ast_expr_address_of(LocalIndex
);
1293 Value
*LocalAddr
= ExprBuilder
.create(LocalIndex
);
1294 isl_ast_expr
*Index
= isl_ast_expr_copy(KernelStmt
->u
.c
.index
);
1295 Index
= isl_ast_expr_address_of(Index
);
1296 Value
*GlobalAddr
= ExprBuilder
.create(Index
);
1298 if (KernelStmt
->u
.c
.read
) {
1299 LoadInst
*Load
= Builder
.CreateLoad(GlobalAddr
, "shared.read");
1300 Builder
.CreateStore(Load
, LocalAddr
);
1302 LoadInst
*Load
= Builder
.CreateLoad(LocalAddr
, "shared.write");
1303 Builder
.CreateStore(Load
, GlobalAddr
);
1307 void GPUNodeBuilder::createScopStmt(isl_ast_expr
*Expr
,
1308 ppcg_kernel_stmt
*KernelStmt
) {
1309 auto Stmt
= (ScopStmt
*)KernelStmt
->u
.d
.stmt
->stmt
;
1310 isl_id_to_ast_expr
*Indexes
= KernelStmt
->u
.d
.ref2expr
;
1313 LTS
.insert(OutsideLoopIterations
.begin(), OutsideLoopIterations
.end());
1315 createSubstitutions(Expr
, Stmt
, LTS
);
1317 if (Stmt
->isBlockStmt())
1318 BlockGen
.copyStmt(*Stmt
, LTS
, Indexes
);
1320 RegionGen
.copyStmt(*Stmt
, LTS
, Indexes
);
1323 void GPUNodeBuilder::createKernelSync() {
1324 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
1325 const char *SpirName
= "__gen_ocl_barrier_global";
1330 case GPUArch::SPIR64
:
1331 case GPUArch::SPIR32
:
1332 Sync
= M
->getFunction(SpirName
);
1334 // If Sync is not available, declare it.
1336 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
1337 std::vector
<Type
*> Args
;
1338 FunctionType
*Ty
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1339 Sync
= Function::Create(Ty
, Linkage
, SpirName
, M
);
1340 Sync
->setCallingConv(CallingConv::SPIR_FUNC
);
1343 case GPUArch::NVPTX64
:
1344 Sync
= Intrinsic::getDeclaration(M
, Intrinsic::nvvm_barrier0
);
1348 Builder
.CreateCall(Sync
, {});
1351 /// Collect llvm::Values referenced from @p Node
1353 /// This function only applies to isl_ast_nodes that are user_nodes referring
1354 /// to a ScopStmt. All other node types are ignore.
1356 /// @param Node The node to collect references for.
1357 /// @param User A user pointer used as storage for the data that is collected.
1359 /// @returns isl_bool_true if data could be collected successfully.
1360 isl_bool
collectReferencesInGPUStmt(__isl_keep isl_ast_node
*Node
, void *User
) {
1361 if (isl_ast_node_get_type(Node
) != isl_ast_node_user
)
1362 return isl_bool_true
;
1364 isl_ast_expr
*Expr
= isl_ast_node_user_get_expr(Node
);
1365 isl_ast_expr
*StmtExpr
= isl_ast_expr_get_op_arg(Expr
, 0);
1366 isl_id
*Id
= isl_ast_expr_get_id(StmtExpr
);
1367 const char *Str
= isl_id_get_name(Id
);
1369 isl_ast_expr_free(StmtExpr
);
1370 isl_ast_expr_free(Expr
);
1372 if (!isPrefix(Str
, "Stmt"))
1373 return isl_bool_true
;
1375 Id
= isl_ast_node_get_annotation(Node
);
1376 auto *KernelStmt
= (ppcg_kernel_stmt
*)isl_id_get_user(Id
);
1377 auto Stmt
= (ScopStmt
*)KernelStmt
->u
.d
.stmt
->stmt
;
1380 addReferencesFromStmt(Stmt
, User
, false /* CreateScalarRefs */);
1382 return isl_bool_true
;
1385 /// A list of functions that are available in NVIDIA's libdevice.
1386 const std::set
<std::string
> CUDALibDeviceFunctions
= {
1387 "exp", "expf", "expl", "cos", "cosf", "sqrt", "sqrtf",
1388 "copysign", "copysignf", "copysignl", "log", "logf", "powi", "powif"};
1390 // A map from intrinsics to their corresponding libdevice functions.
1391 const std::map
<std::string
, std::string
> IntrinsicToLibdeviceFunc
= {
1392 {"llvm.exp.f64", "exp"},
1393 {"llvm.exp.f32", "expf"},
1394 {"llvm.powi.f64", "powi"},
1395 {"llvm.powi.f32", "powif"}};
1397 /// Return the corresponding CUDA libdevice function name @p Name.
1398 /// Note that this function will try to convert instrinsics in the list
1399 /// IntrinsicToLibdeviceFunc into libdevice functions.
1400 /// This is because some intrinsics such as `exp`
1401 /// are not supported by the NVPTX backend.
1402 /// If this restriction of the backend is lifted, we should refactor our code
1403 /// so that we use intrinsics whenever possible.
1405 /// Return "" if we are not compiling for CUDA.
1406 std::string
getCUDALibDeviceFuntion(StringRef NameRef
) {
1407 std::string Name
= NameRef
.str();
1408 auto It
= IntrinsicToLibdeviceFunc
.find(Name
);
1409 if (It
!= IntrinsicToLibdeviceFunc
.end())
1410 return getCUDALibDeviceFuntion(It
->second
);
1412 if (CUDALibDeviceFunctions
.count(Name
))
1413 return ("__nv_" + Name
);
1418 /// Check if F is a function that we can code-generate in a GPU kernel.
1419 static bool isValidFunctionInKernel(llvm::Function
*F
, bool AllowLibDevice
) {
1420 assert(F
&& "F is an invalid pointer");
1421 // We string compare against the name of the function to allow
1422 // all variants of the intrinsic "llvm.sqrt.*", "llvm.fabs", and
1424 const StringRef Name
= F
->getName();
1426 if (AllowLibDevice
&& getCUDALibDeviceFuntion(Name
).length() > 0)
1429 return F
->isIntrinsic() &&
1430 (Name
.startswith("llvm.sqrt") || Name
.startswith("llvm.fabs") ||
1431 Name
.startswith("llvm.copysign"));
1434 /// Do not take `Function` as a subtree value.
1436 /// We try to take the reference of all subtree values and pass them along
1437 /// to the kernel from the host. Taking an address of any function and
1438 /// trying to pass along is nonsensical. Only allow `Value`s that are not
1440 static bool isValidSubtreeValue(llvm::Value
*V
) { return !isa
<Function
>(V
); }
1442 /// Return `Function`s from `RawSubtreeValues`.
1443 static SetVector
<Function
*>
1444 getFunctionsFromRawSubtreeValues(SetVector
<Value
*> RawSubtreeValues
,
1445 bool AllowCUDALibDevice
) {
1446 SetVector
<Function
*> SubtreeFunctions
;
1447 for (Value
*It
: RawSubtreeValues
) {
1448 Function
*F
= dyn_cast
<Function
>(It
);
1450 assert(isValidFunctionInKernel(F
, AllowCUDALibDevice
) &&
1451 "Code should have bailed out by "
1452 "this point if an invalid function "
1453 "were present in a kernel.");
1454 SubtreeFunctions
.insert(F
);
1457 return SubtreeFunctions
;
1460 std::tuple
<SetVector
<Value
*>, SetVector
<Function
*>, SetVector
<const Loop
*>,
1462 GPUNodeBuilder::getReferencesInKernel(ppcg_kernel
*Kernel
) {
1463 SetVector
<Value
*> SubtreeValues
;
1464 SetVector
<const SCEV
*> SCEVs
;
1465 SetVector
<const Loop
*> Loops
;
1466 isl::space ParamSpace
= isl::space(S
.getIslCtx(), 0, 0).params();
1467 SubtreeReferences References
= {
1468 LI
, SE
, S
, ValueMap
, SubtreeValues
, SCEVs
, getBlockGenerator(),
1471 for (const auto &I
: IDToValue
)
1472 SubtreeValues
.insert(I
.second
);
1474 // NOTE: this is populated in IslNodeBuilder::addParameters
1475 // See [Code generation of induction variables of loops outside Scops].
1476 for (const auto &I
: OutsideLoopIterations
)
1477 SubtreeValues
.insert(cast
<SCEVUnknown
>(I
.second
)->getValue());
1479 isl_ast_node_foreach_descendant_top_down(
1480 Kernel
->tree
, collectReferencesInGPUStmt
, &References
);
1482 for (const SCEV
*Expr
: SCEVs
) {
1483 findValues(Expr
, SE
, SubtreeValues
);
1484 findLoops(Expr
, Loops
);
1487 Loops
.remove_if([this](const Loop
*L
) {
1488 return S
.contains(L
) || L
->contains(S
.getEntry());
1491 for (auto &SAI
: S
.arrays())
1492 SubtreeValues
.remove(SAI
->getBasePtr());
1494 isl_space
*Space
= S
.getParamSpace().release();
1495 for (long i
= 0, n
= isl_space_dim(Space
, isl_dim_param
); i
< n
; i
++) {
1496 isl_id
*Id
= isl_space_get_dim_id(Space
, isl_dim_param
, i
);
1497 assert(IDToValue
.count(Id
));
1498 Value
*Val
= IDToValue
[Id
];
1499 SubtreeValues
.remove(Val
);
1502 isl_space_free(Space
);
1504 for (long i
= 0, n
= isl_space_dim(Kernel
->space
, isl_dim_set
); i
< n
; i
++) {
1505 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1506 assert(IDToValue
.count(Id
));
1507 Value
*Val
= IDToValue
[Id
];
1508 SubtreeValues
.remove(Val
);
1512 // Note: { ValidSubtreeValues, ValidSubtreeFunctions } partitions
1513 // SubtreeValues. This is important, because we should not lose any
1514 // SubtreeValues in the process of constructing the
1515 // "ValidSubtree{Values, Functions} sets. Nor should the set
1516 // ValidSubtree{Values, Functions} have any common element.
1517 auto ValidSubtreeValuesIt
=
1518 make_filter_range(SubtreeValues
, isValidSubtreeValue
);
1519 SetVector
<Value
*> ValidSubtreeValues(ValidSubtreeValuesIt
.begin(),
1520 ValidSubtreeValuesIt
.end());
1522 bool AllowCUDALibDevice
= Arch
== GPUArch::NVPTX64
;
1524 SetVector
<Function
*> ValidSubtreeFunctions(
1525 getFunctionsFromRawSubtreeValues(SubtreeValues
, AllowCUDALibDevice
));
1527 // @see IslNodeBuilder::getReferencesInSubtree
1528 SetVector
<Value
*> ReplacedValues
;
1529 for (Value
*V
: ValidSubtreeValues
) {
1530 auto It
= ValueMap
.find(V
);
1531 if (It
== ValueMap
.end())
1532 ReplacedValues
.insert(V
);
1534 ReplacedValues
.insert(It
->second
);
1536 return std::make_tuple(ReplacedValues
, ValidSubtreeFunctions
, Loops
,
1540 void GPUNodeBuilder::clearDominators(Function
*F
) {
1541 DomTreeNode
*N
= DT
.getNode(&F
->getEntryBlock());
1542 std::vector
<BasicBlock
*> Nodes
;
1543 for (po_iterator
<DomTreeNode
*> I
= po_begin(N
), E
= po_end(N
); I
!= E
; ++I
)
1544 Nodes
.push_back(I
->getBlock());
1546 for (BasicBlock
*BB
: Nodes
)
1550 void GPUNodeBuilder::clearScalarEvolution(Function
*F
) {
1551 for (BasicBlock
&BB
: *F
) {
1552 Loop
*L
= LI
.getLoopFor(&BB
);
1558 void GPUNodeBuilder::clearLoops(Function
*F
) {
1559 SmallSet
<Loop
*, 1> WorkList
;
1560 for (BasicBlock
&BB
: *F
) {
1561 Loop
*L
= LI
.getLoopFor(&BB
);
1565 for (auto *L
: WorkList
)
1569 std::tuple
<Value
*, Value
*> GPUNodeBuilder::getGridSizes(ppcg_kernel
*Kernel
) {
1570 std::vector
<Value
*> Sizes
;
1571 isl::ast_build Context
= isl::ast_build::from_context(S
.getContext());
1573 isl::multi_pw_aff GridSizePwAffs
= isl::manage_copy(Kernel
->grid_size
);
1574 for (long i
= 0; i
< Kernel
->n_grid
; i
++) {
1575 isl::pw_aff Size
= GridSizePwAffs
.get_pw_aff(i
);
1576 isl::ast_expr GridSize
= Context
.expr_from(Size
);
1577 Value
*Res
= ExprBuilder
.create(GridSize
.release());
1578 Res
= Builder
.CreateTrunc(Res
, Builder
.getInt32Ty());
1579 Sizes
.push_back(Res
);
1582 for (long i
= Kernel
->n_grid
; i
< 3; i
++)
1583 Sizes
.push_back(ConstantInt::get(Builder
.getInt32Ty(), 1));
1585 return std::make_tuple(Sizes
[0], Sizes
[1]);
1588 std::tuple
<Value
*, Value
*, Value
*>
1589 GPUNodeBuilder::getBlockSizes(ppcg_kernel
*Kernel
) {
1590 std::vector
<Value
*> Sizes
;
1592 for (long i
= 0; i
< Kernel
->n_block
; i
++) {
1593 Value
*Res
= ConstantInt::get(Builder
.getInt32Ty(), Kernel
->block_dim
[i
]);
1594 Sizes
.push_back(Res
);
1597 for (long i
= Kernel
->n_block
; i
< 3; i
++)
1598 Sizes
.push_back(ConstantInt::get(Builder
.getInt32Ty(), 1));
1600 return std::make_tuple(Sizes
[0], Sizes
[1], Sizes
[2]);
1603 void GPUNodeBuilder::insertStoreParameter(Instruction
*Parameters
,
1604 Instruction
*Param
, int Index
) {
1605 Value
*Slot
= Builder
.CreateGEP(
1606 Parameters
, {Builder
.getInt64(0), Builder
.getInt64(Index
)});
1607 Value
*ParamTyped
= Builder
.CreatePointerCast(Param
, Builder
.getInt8PtrTy());
1608 Builder
.CreateStore(ParamTyped
, Slot
);
1612 GPUNodeBuilder::createLaunchParameters(ppcg_kernel
*Kernel
, Function
*F
,
1613 SetVector
<Value
*> SubtreeValues
) {
1614 const int NumArgs
= F
->arg_size();
1615 std::vector
<int> ArgSizes(NumArgs
);
1617 // If we are using the OpenCL Runtime, we need to add the kernel argument
1618 // sizes to the end of the launch-parameter list, so OpenCL can determine
1619 // how big the respective kernel arguments are.
1620 // Here we need to reserve adequate space for that.
1622 if (Runtime
== GPURuntime::OpenCL
)
1623 ArrayTy
= ArrayType::get(Builder
.getInt8PtrTy(), 2 * NumArgs
);
1625 ArrayTy
= ArrayType::get(Builder
.getInt8PtrTy(), NumArgs
);
1627 BasicBlock
*EntryBlock
=
1628 &Builder
.GetInsertBlock()->getParent()->getEntryBlock();
1629 auto AddressSpace
= F
->getParent()->getDataLayout().getAllocaAddrSpace();
1630 std::string Launch
= "polly_launch_" + std::to_string(Kernel
->id
);
1631 Instruction
*Parameters
= new AllocaInst(
1632 ArrayTy
, AddressSpace
, Launch
+ "_params", EntryBlock
->getTerminator());
1635 for (long i
= 0; i
< Prog
->n_array
; i
++) {
1636 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1639 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1640 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage(Id
));
1642 if (Runtime
== GPURuntime::OpenCL
)
1643 ArgSizes
[Index
] = SAI
->getElemSizeInBytes();
1645 Value
*DevArray
= nullptr;
1646 if (PollyManagedMemory
) {
1647 DevArray
= getManagedDeviceArray(&Prog
->array
[i
],
1648 const_cast<ScopArrayInfo
*>(SAI
));
1650 DevArray
= DeviceAllocations
[const_cast<ScopArrayInfo
*>(SAI
)];
1651 DevArray
= createCallGetDevicePtr(DevArray
);
1653 assert(DevArray
!= nullptr && "Array to be offloaded to device not "
1655 Value
*Offset
= getArrayOffset(&Prog
->array
[i
]);
1658 DevArray
= Builder
.CreatePointerCast(
1659 DevArray
, SAI
->getElementType()->getPointerTo());
1660 DevArray
= Builder
.CreateGEP(DevArray
, Builder
.CreateNeg(Offset
));
1661 DevArray
= Builder
.CreatePointerCast(DevArray
, Builder
.getInt8PtrTy());
1663 Value
*Slot
= Builder
.CreateGEP(
1664 Parameters
, {Builder
.getInt64(0), Builder
.getInt64(Index
)});
1666 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
1667 Value
*ValPtr
= nullptr;
1668 if (PollyManagedMemory
)
1671 ValPtr
= BlockGen
.getOrCreateAlloca(SAI
);
1673 assert(ValPtr
!= nullptr && "ValPtr that should point to a valid object"
1674 " to be stored into Parameters");
1676 Builder
.CreatePointerCast(ValPtr
, Builder
.getInt8PtrTy());
1677 Builder
.CreateStore(ValPtrCast
, Slot
);
1679 Instruction
*Param
=
1680 new AllocaInst(Builder
.getInt8PtrTy(), AddressSpace
,
1681 Launch
+ "_param_" + std::to_string(Index
),
1682 EntryBlock
->getTerminator());
1683 Builder
.CreateStore(DevArray
, Param
);
1685 Builder
.CreatePointerCast(Param
, Builder
.getInt8PtrTy());
1686 Builder
.CreateStore(ParamTyped
, Slot
);
1691 int NumHostIters
= isl_space_dim(Kernel
->space
, isl_dim_set
);
1693 for (long i
= 0; i
< NumHostIters
; i
++) {
1694 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
1695 Value
*Val
= IDToValue
[Id
];
1698 if (Runtime
== GPURuntime::OpenCL
)
1699 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1701 Instruction
*Param
=
1702 new AllocaInst(Val
->getType(), AddressSpace
,
1703 Launch
+ "_param_" + std::to_string(Index
),
1704 EntryBlock
->getTerminator());
1705 Builder
.CreateStore(Val
, Param
);
1706 insertStoreParameter(Parameters
, Param
, Index
);
1710 int NumVars
= isl_space_dim(Kernel
->space
, isl_dim_param
);
1712 for (long i
= 0; i
< NumVars
; i
++) {
1713 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1714 Value
*Val
= IDToValue
[Id
];
1715 if (ValueMap
.count(Val
))
1716 Val
= ValueMap
[Val
];
1719 if (Runtime
== GPURuntime::OpenCL
)
1720 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1722 Instruction
*Param
=
1723 new AllocaInst(Val
->getType(), AddressSpace
,
1724 Launch
+ "_param_" + std::to_string(Index
),
1725 EntryBlock
->getTerminator());
1726 Builder
.CreateStore(Val
, Param
);
1727 insertStoreParameter(Parameters
, Param
, Index
);
1731 for (auto Val
: SubtreeValues
) {
1732 if (Runtime
== GPURuntime::OpenCL
)
1733 ArgSizes
[Index
] = computeSizeInBytes(Val
->getType());
1735 Instruction
*Param
=
1736 new AllocaInst(Val
->getType(), AddressSpace
,
1737 Launch
+ "_param_" + std::to_string(Index
),
1738 EntryBlock
->getTerminator());
1739 Builder
.CreateStore(Val
, Param
);
1740 insertStoreParameter(Parameters
, Param
, Index
);
1744 if (Runtime
== GPURuntime::OpenCL
) {
1745 for (int i
= 0; i
< NumArgs
; i
++) {
1746 Value
*Val
= ConstantInt::get(Builder
.getInt32Ty(), ArgSizes
[i
]);
1747 Instruction
*Param
=
1748 new AllocaInst(Builder
.getInt32Ty(), AddressSpace
,
1749 Launch
+ "_param_size_" + std::to_string(i
),
1750 EntryBlock
->getTerminator());
1751 Builder
.CreateStore(Val
, Param
);
1752 insertStoreParameter(Parameters
, Param
, Index
);
1757 auto Location
= EntryBlock
->getTerminator();
1758 return new BitCastInst(Parameters
, Builder
.getInt8PtrTy(),
1759 Launch
+ "_params_i8ptr", Location
);
1762 void GPUNodeBuilder::setupKernelSubtreeFunctions(
1763 SetVector
<Function
*> SubtreeFunctions
) {
1764 for (auto Fn
: SubtreeFunctions
) {
1765 const std::string ClonedFnName
= Fn
->getName().str();
1766 Function
*Clone
= GPUModule
->getFunction(ClonedFnName
);
1769 Function::Create(Fn
->getFunctionType(), GlobalValue::ExternalLinkage
,
1770 ClonedFnName
, GPUModule
.get());
1771 assert(Clone
&& "Expected cloned function to be initialized.");
1772 assert(ValueMap
.find(Fn
) == ValueMap
.end() &&
1773 "Fn already present in ValueMap");
1774 ValueMap
[Fn
] = Clone
;
1777 void GPUNodeBuilder::createKernel(__isl_take isl_ast_node
*KernelStmt
) {
1778 isl_id
*Id
= isl_ast_node_get_annotation(KernelStmt
);
1779 ppcg_kernel
*Kernel
= (ppcg_kernel
*)isl_id_get_user(Id
);
1781 isl_ast_node_free(KernelStmt
);
1783 if (Kernel
->n_grid
> 1)
1784 DeepestParallel
= std::max(
1785 DeepestParallel
, (unsigned)isl_space_dim(Kernel
->space
, isl_dim_set
));
1787 DeepestSequential
= std::max(
1788 DeepestSequential
, (unsigned)isl_space_dim(Kernel
->space
, isl_dim_set
));
1790 Value
*BlockDimX
, *BlockDimY
, *BlockDimZ
;
1791 std::tie(BlockDimX
, BlockDimY
, BlockDimZ
) = getBlockSizes(Kernel
);
1793 SetVector
<Value
*> SubtreeValues
;
1794 SetVector
<Function
*> SubtreeFunctions
;
1795 SetVector
<const Loop
*> Loops
;
1796 isl::space ParamSpace
;
1797 std::tie(SubtreeValues
, SubtreeFunctions
, Loops
, ParamSpace
) =
1798 getReferencesInKernel(Kernel
);
1800 // Add parameters that appear only in the access function to the kernel
1801 // space. This is important to make sure that all isl_ids are passed as
1802 // parameters to the kernel, even though we may not have all parameters
1803 // in the context to improve compile time.
1804 Kernel
->space
= isl_space_align_params(Kernel
->space
, ParamSpace
.release());
1806 assert(Kernel
->tree
&& "Device AST of kernel node is empty");
1808 Instruction
&HostInsertPoint
= *Builder
.GetInsertPoint();
1809 IslExprBuilder::IDToValueTy HostIDs
= IDToValue
;
1810 ValueMapT HostValueMap
= ValueMap
;
1811 BlockGenerator::AllocaMapTy HostScalarMap
= ScalarMap
;
1813 BlockGenerator::EscapeUsersAllocaMapTy HostEscapeMap
= EscapeMap
;
1816 // Create for all loops we depend on values that contain the current loop
1817 // iteration. These values are necessary to generate code for SCEVs that
1818 // depend on such loops. As a result we need to pass them to the subfunction.
1819 for (const Loop
*L
: Loops
) {
1820 const SCEV
*OuterLIV
= SE
.getAddRecExpr(SE
.getUnknown(Builder
.getInt64(0)),
1821 SE
.getUnknown(Builder
.getInt64(1)),
1822 L
, SCEV::FlagAnyWrap
);
1823 Value
*V
= generateSCEV(OuterLIV
);
1824 OutsideLoopIterations
[L
] = SE
.getUnknown(V
);
1825 SubtreeValues
.insert(V
);
1828 createKernelFunction(Kernel
, SubtreeValues
, SubtreeFunctions
);
1829 setupKernelSubtreeFunctions(SubtreeFunctions
);
1831 create(isl_ast_node_copy(Kernel
->tree
));
1833 finalizeKernelArguments(Kernel
);
1834 Function
*F
= Builder
.GetInsertBlock()->getParent();
1835 if (Arch
== GPUArch::NVPTX64
)
1836 addCUDAAnnotations(F
->getParent(), BlockDimX
, BlockDimY
, BlockDimZ
);
1838 clearScalarEvolution(F
);
1841 IDToValue
= HostIDs
;
1843 ValueMap
= std::move(HostValueMap
);
1844 ScalarMap
= std::move(HostScalarMap
);
1845 EscapeMap
= std::move(HostEscapeMap
);
1847 Annotator
.resetAlternativeAliasBases();
1848 for (auto &BasePtr
: LocalArrays
)
1849 S
.invalidateScopArrayInfo(BasePtr
, MemoryKind::Array
);
1850 LocalArrays
.clear();
1852 std::string ASMString
= finalizeKernelFunction();
1853 Builder
.SetInsertPoint(&HostInsertPoint
);
1854 Value
*Parameters
= createLaunchParameters(Kernel
, F
, SubtreeValues
);
1856 std::string Name
= getKernelFuncName(Kernel
->id
);
1857 Value
*KernelString
= Builder
.CreateGlobalStringPtr(ASMString
, Name
);
1858 Value
*NameString
= Builder
.CreateGlobalStringPtr(Name
, Name
+ "_name");
1859 Value
*GPUKernel
= createCallGetKernel(KernelString
, NameString
);
1861 Value
*GridDimX
, *GridDimY
;
1862 std::tie(GridDimX
, GridDimY
) = getGridSizes(Kernel
);
1864 createCallLaunchKernel(GPUKernel
, GridDimX
, GridDimY
, BlockDimX
, BlockDimY
,
1865 BlockDimZ
, Parameters
);
1866 createCallFreeKernel(GPUKernel
);
1868 for (auto Id
: KernelIds
)
1874 /// Compute the DataLayout string for the NVPTX backend.
1876 /// @param is64Bit Are we looking for a 64 bit architecture?
1877 static std::string
computeNVPTXDataLayout(bool is64Bit
) {
1878 std::string Ret
= "";
1881 Ret
+= "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1882 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1883 "64-v128:128:128-n16:32:64";
1885 Ret
+= "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1886 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v32:32:32-v64:64:"
1887 "64-v128:128:128-n16:32:64";
1893 /// Compute the DataLayout string for a SPIR kernel.
1895 /// @param is64Bit Are we looking for a 64 bit architecture?
1896 static std::string
computeSPIRDataLayout(bool is64Bit
) {
1897 std::string Ret
= "";
1900 Ret
+= "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1901 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1902 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1903 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1905 Ret
+= "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:"
1906 "64-i128:128:128-f32:32:32-f64:64:64-v16:16:16-v24:32:32-v32:32:"
1907 "32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:"
1908 "256:256-v256:256:256-v512:512:512-v1024:1024:1024";
1915 GPUNodeBuilder::createKernelFunctionDecl(ppcg_kernel
*Kernel
,
1916 SetVector
<Value
*> &SubtreeValues
) {
1917 std::vector
<Type
*> Args
;
1918 std::string Identifier
= getKernelFuncName(Kernel
->id
);
1920 std::vector
<Metadata
*> MemoryType
;
1922 for (long i
= 0; i
< Prog
->n_array
; i
++) {
1923 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
1926 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
1927 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
1928 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage(Id
));
1929 Args
.push_back(SAI
->getElementType());
1930 MemoryType
.push_back(
1931 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1933 static const int UseGlobalMemory
= 1;
1934 Args
.push_back(Builder
.getInt8PtrTy(UseGlobalMemory
));
1935 MemoryType
.push_back(
1936 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 1)));
1940 int NumHostIters
= isl_space_dim(Kernel
->space
, isl_dim_set
);
1942 for (long i
= 0; i
< NumHostIters
; i
++) {
1943 Args
.push_back(Builder
.getInt64Ty());
1944 MemoryType
.push_back(
1945 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1948 int NumVars
= isl_space_dim(Kernel
->space
, isl_dim_param
);
1950 for (long i
= 0; i
< NumVars
; i
++) {
1951 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
1952 Value
*Val
= IDToValue
[Id
];
1954 Args
.push_back(Val
->getType());
1955 MemoryType
.push_back(
1956 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1959 for (auto *V
: SubtreeValues
) {
1960 Args
.push_back(V
->getType());
1961 MemoryType
.push_back(
1962 ConstantAsMetadata::get(ConstantInt::get(Builder
.getInt32Ty(), 0)));
1965 auto *FT
= FunctionType::get(Builder
.getVoidTy(), Args
, false);
1966 auto *FN
= Function::Create(FT
, Function::ExternalLinkage
, Identifier
,
1969 std::vector
<Metadata
*> EmptyStrings
;
1971 for (unsigned int i
= 0; i
< MemoryType
.size(); i
++) {
1972 EmptyStrings
.push_back(MDString::get(FN
->getContext(), ""));
1975 if (Arch
== GPUArch::SPIR32
|| Arch
== GPUArch::SPIR64
) {
1976 FN
->setMetadata("kernel_arg_addr_space",
1977 MDNode::get(FN
->getContext(), MemoryType
));
1978 FN
->setMetadata("kernel_arg_name",
1979 MDNode::get(FN
->getContext(), EmptyStrings
));
1980 FN
->setMetadata("kernel_arg_access_qual",
1981 MDNode::get(FN
->getContext(), EmptyStrings
));
1982 FN
->setMetadata("kernel_arg_type",
1983 MDNode::get(FN
->getContext(), EmptyStrings
));
1984 FN
->setMetadata("kernel_arg_type_qual",
1985 MDNode::get(FN
->getContext(), EmptyStrings
));
1986 FN
->setMetadata("kernel_arg_base_type",
1987 MDNode::get(FN
->getContext(), EmptyStrings
));
1991 case GPUArch::NVPTX64
:
1992 FN
->setCallingConv(CallingConv::PTX_Kernel
);
1994 case GPUArch::SPIR32
:
1995 case GPUArch::SPIR64
:
1996 FN
->setCallingConv(CallingConv::SPIR_KERNEL
);
2000 auto Arg
= FN
->arg_begin();
2001 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2002 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2005 Arg
->setName(Kernel
->array
[i
].array
->name
);
2007 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2008 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage_copy(Id
));
2009 Type
*EleTy
= SAI
->getElementType();
2011 SmallVector
<const SCEV
*, 4> Sizes
;
2012 isl_ast_build
*Build
=
2013 isl_ast_build_from_context(isl_set_copy(Prog
->context
));
2014 Sizes
.push_back(nullptr);
2015 for (long j
= 1, n
= Kernel
->array
[i
].array
->n_index
; j
< n
; j
++) {
2016 isl_ast_expr
*DimSize
= isl_ast_build_expr_from_pw_aff(
2017 Build
, isl_multi_pw_aff_get_pw_aff(Kernel
->array
[i
].array
->bound
, j
));
2018 auto V
= ExprBuilder
.create(DimSize
);
2019 Sizes
.push_back(SE
.getSCEV(V
));
2021 const ScopArrayInfo
*SAIRep
=
2022 S
.getOrCreateScopArrayInfo(Val
, EleTy
, Sizes
, MemoryKind::Array
);
2023 LocalArrays
.push_back(Val
);
2025 isl_ast_build_free(Build
);
2026 KernelIds
.push_back(Id
);
2027 IDToSAI
[Id
] = SAIRep
;
2031 for (long i
= 0; i
< NumHostIters
; i
++) {
2032 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_set
, i
);
2033 Arg
->setName(isl_id_get_name(Id
));
2034 IDToValue
[Id
] = &*Arg
;
2035 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2039 for (long i
= 0; i
< NumVars
; i
++) {
2040 isl_id
*Id
= isl_space_get_dim_id(Kernel
->space
, isl_dim_param
, i
);
2041 Arg
->setName(isl_id_get_name(Id
));
2042 Value
*Val
= IDToValue
[Id
];
2043 ValueMap
[Val
] = &*Arg
;
2044 IDToValue
[Id
] = &*Arg
;
2045 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2049 for (auto *V
: SubtreeValues
) {
2050 Arg
->setName(V
->getName());
2051 ValueMap
[V
] = &*Arg
;
2058 void GPUNodeBuilder::insertKernelIntrinsics(ppcg_kernel
*Kernel
) {
2059 Intrinsic::ID IntrinsicsBID
[2];
2060 Intrinsic::ID IntrinsicsTID
[3];
2063 case GPUArch::SPIR64
:
2064 case GPUArch::SPIR32
:
2065 llvm_unreachable("Cannot generate NVVM intrinsics for SPIR");
2066 case GPUArch::NVPTX64
:
2067 IntrinsicsBID
[0] = Intrinsic::nvvm_read_ptx_sreg_ctaid_x
;
2068 IntrinsicsBID
[1] = Intrinsic::nvvm_read_ptx_sreg_ctaid_y
;
2070 IntrinsicsTID
[0] = Intrinsic::nvvm_read_ptx_sreg_tid_x
;
2071 IntrinsicsTID
[1] = Intrinsic::nvvm_read_ptx_sreg_tid_y
;
2072 IntrinsicsTID
[2] = Intrinsic::nvvm_read_ptx_sreg_tid_z
;
2076 auto addId
= [this](__isl_take isl_id
*Id
, Intrinsic::ID Intr
) mutable {
2077 std::string Name
= isl_id_get_name(Id
);
2078 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2079 Function
*IntrinsicFn
= Intrinsic::getDeclaration(M
, Intr
);
2080 Value
*Val
= Builder
.CreateCall(IntrinsicFn
, {});
2081 Val
= Builder
.CreateIntCast(Val
, Builder
.getInt64Ty(), false, Name
);
2082 IDToValue
[Id
] = Val
;
2083 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2086 for (int i
= 0; i
< Kernel
->n_grid
; ++i
) {
2087 isl_id
*Id
= isl_id_list_get_id(Kernel
->block_ids
, i
);
2088 addId(Id
, IntrinsicsBID
[i
]);
2091 for (int i
= 0; i
< Kernel
->n_block
; ++i
) {
2092 isl_id
*Id
= isl_id_list_get_id(Kernel
->thread_ids
, i
);
2093 addId(Id
, IntrinsicsTID
[i
]);
2097 void GPUNodeBuilder::insertKernelCallsSPIR(ppcg_kernel
*Kernel
,
2098 bool SizeTypeIs64bit
) {
2099 const char *GroupName
[3] = {"__gen_ocl_get_group_id0",
2100 "__gen_ocl_get_group_id1",
2101 "__gen_ocl_get_group_id2"};
2103 const char *LocalName
[3] = {"__gen_ocl_get_local_id0",
2104 "__gen_ocl_get_local_id1",
2105 "__gen_ocl_get_local_id2"};
2106 IntegerType
*SizeT
=
2107 SizeTypeIs64bit
? Builder
.getInt64Ty() : Builder
.getInt32Ty();
2109 auto createFunc
= [this](const char *Name
, __isl_take isl_id
*Id
,
2110 IntegerType
*SizeT
) mutable {
2111 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2112 Function
*FN
= M
->getFunction(Name
);
2114 // If FN is not available, declare it.
2116 GlobalValue::LinkageTypes Linkage
= Function::ExternalLinkage
;
2117 std::vector
<Type
*> Args
;
2118 FunctionType
*Ty
= FunctionType::get(SizeT
, Args
, false);
2119 FN
= Function::Create(Ty
, Linkage
, Name
, M
);
2120 FN
->setCallingConv(CallingConv::SPIR_FUNC
);
2123 Value
*Val
= Builder
.CreateCall(FN
, {});
2124 if (SizeT
== Builder
.getInt32Ty())
2125 Val
= Builder
.CreateIntCast(Val
, Builder
.getInt64Ty(), false, Name
);
2126 IDToValue
[Id
] = Val
;
2127 KernelIDs
.insert(std::unique_ptr
<isl_id
, IslIdDeleter
>(Id
));
2130 for (int i
= 0; i
< Kernel
->n_grid
; ++i
)
2131 createFunc(GroupName
[i
], isl_id_list_get_id(Kernel
->block_ids
, i
), SizeT
);
2133 for (int i
= 0; i
< Kernel
->n_block
; ++i
)
2134 createFunc(LocalName
[i
], isl_id_list_get_id(Kernel
->thread_ids
, i
), SizeT
);
2137 void GPUNodeBuilder::prepareKernelArguments(ppcg_kernel
*Kernel
, Function
*FN
) {
2138 auto Arg
= FN
->arg_begin();
2139 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2140 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2143 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2144 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage_copy(Id
));
2147 if (SAI
->getNumberOfDimensions() > 0) {
2154 if (!gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
2155 Type
*TypePtr
= SAI
->getElementType()->getPointerTo();
2156 Value
*TypedArgPtr
= Builder
.CreatePointerCast(Val
, TypePtr
);
2157 Val
= Builder
.CreateLoad(TypedArgPtr
);
2160 Value
*Alloca
= BlockGen
.getOrCreateAlloca(SAI
);
2161 Builder
.CreateStore(Val
, Alloca
);
2167 void GPUNodeBuilder::finalizeKernelArguments(ppcg_kernel
*Kernel
) {
2168 auto *FN
= Builder
.GetInsertBlock()->getParent();
2169 auto Arg
= FN
->arg_begin();
2171 bool StoredScalar
= false;
2172 for (long i
= 0; i
< Kernel
->n_array
; i
++) {
2173 if (!ppcg_kernel_requires_array_argument(Kernel
, i
))
2176 isl_id
*Id
= isl_space_get_tuple_id(Prog
->array
[i
].space
, isl_dim_set
);
2177 const ScopArrayInfo
*SAI
= ScopArrayInfo::getFromId(isl::manage_copy(Id
));
2180 if (SAI
->getNumberOfDimensions() > 0) {
2185 if (gpu_array_is_read_only_scalar(&Prog
->array
[i
])) {
2190 Value
*Alloca
= BlockGen
.getOrCreateAlloca(SAI
);
2191 Value
*ArgPtr
= &*Arg
;
2192 Type
*TypePtr
= SAI
->getElementType()->getPointerTo();
2193 Value
*TypedArgPtr
= Builder
.CreatePointerCast(ArgPtr
, TypePtr
);
2194 Value
*Val
= Builder
.CreateLoad(Alloca
);
2195 Builder
.CreateStore(Val
, TypedArgPtr
);
2196 StoredScalar
= true;
2202 /// In case more than one thread contains scalar stores, the generated
2203 /// code might be incorrect, if we only store at the end of the kernel.
2204 /// To support this case we need to store these scalars back at each
2205 /// memory store or at least before each kernel barrier.
2206 if (Kernel
->n_block
!= 0 || Kernel
->n_grid
!= 0) {
2207 BuildSuccessful
= 0;
2209 dbgs() << getUniqueScopName(&S
)
2210 << " has a store to a scalar value that"
2211 " would be undefined to run in parallel. Bailing out.\n";);
2216 void GPUNodeBuilder::createKernelVariables(ppcg_kernel
*Kernel
, Function
*FN
) {
2217 Module
*M
= Builder
.GetInsertBlock()->getParent()->getParent();
2219 for (int i
= 0; i
< Kernel
->n_var
; ++i
) {
2220 struct ppcg_kernel_var
&Var
= Kernel
->var
[i
];
2221 isl_id
*Id
= isl_space_get_tuple_id(Var
.array
->space
, isl_dim_set
);
2222 Type
*EleTy
= ScopArrayInfo::getFromId(isl::manage(Id
))->getElementType();
2224 Type
*ArrayTy
= EleTy
;
2225 SmallVector
<const SCEV
*, 4> Sizes
;
2227 Sizes
.push_back(nullptr);
2228 for (unsigned int j
= 1; j
< Var
.array
->n_index
; ++j
) {
2229 isl_val
*Val
= isl_vec_get_element_val(Var
.size
, j
);
2230 long Bound
= isl_val_get_num_si(Val
);
2232 Sizes
.push_back(S
.getSE()->getConstant(Builder
.getInt64Ty(), Bound
));
2235 for (int j
= Var
.array
->n_index
- 1; j
>= 0; --j
) {
2236 isl_val
*Val
= isl_vec_get_element_val(Var
.size
, j
);
2237 long Bound
= isl_val_get_num_si(Val
);
2239 ArrayTy
= ArrayType::get(ArrayTy
, Bound
);
2242 const ScopArrayInfo
*SAI
;
2244 if (Var
.type
== ppcg_access_shared
) {
2245 auto GlobalVar
= new GlobalVariable(
2246 *M
, ArrayTy
, false, GlobalValue::InternalLinkage
, 0, Var
.name
,
2247 nullptr, GlobalValue::ThreadLocalMode::NotThreadLocal
, 3);
2248 GlobalVar
->setAlignment(llvm::Align(EleTy
->getPrimitiveSizeInBits() / 8));
2249 GlobalVar
->setInitializer(Constant::getNullValue(ArrayTy
));
2251 Allocation
= GlobalVar
;
2252 } else if (Var
.type
== ppcg_access_private
) {
2253 Allocation
= Builder
.CreateAlloca(ArrayTy
, 0, "private_array");
2255 llvm_unreachable("unknown variable type");
2258 S
.getOrCreateScopArrayInfo(Allocation
, EleTy
, Sizes
, MemoryKind::Array
);
2259 Id
= isl_id_alloc(S
.getIslCtx().get(), Var
.name
, nullptr);
2260 IDToValue
[Id
] = Allocation
;
2261 LocalArrays
.push_back(Allocation
);
2262 KernelIds
.push_back(Id
);
2267 void GPUNodeBuilder::createKernelFunction(
2268 ppcg_kernel
*Kernel
, SetVector
<Value
*> &SubtreeValues
,
2269 SetVector
<Function
*> &SubtreeFunctions
) {
2270 std::string Identifier
= getKernelFuncName(Kernel
->id
);
2271 GPUModule
.reset(new Module(Identifier
, Builder
.getContext()));
2274 case GPUArch::NVPTX64
:
2275 if (Runtime
== GPURuntime::CUDA
)
2276 GPUModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2277 else if (Runtime
== GPURuntime::OpenCL
)
2278 GPUModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-nvcl"));
2279 GPUModule
->setDataLayout(computeNVPTXDataLayout(true /* is64Bit */));
2281 case GPUArch::SPIR32
:
2282 GPUModule
->setTargetTriple(Triple::normalize("spir-unknown-unknown"));
2283 GPUModule
->setDataLayout(computeSPIRDataLayout(false /* is64Bit */));
2285 case GPUArch::SPIR64
:
2286 GPUModule
->setTargetTriple(Triple::normalize("spir64-unknown-unknown"));
2287 GPUModule
->setDataLayout(computeSPIRDataLayout(true /* is64Bit */));
2291 Function
*FN
= createKernelFunctionDecl(Kernel
, SubtreeValues
);
2293 BasicBlock
*PrevBlock
= Builder
.GetInsertBlock();
2294 auto EntryBlock
= BasicBlock::Create(Builder
.getContext(), "entry", FN
);
2296 DT
.addNewBlock(EntryBlock
, PrevBlock
);
2298 Builder
.SetInsertPoint(EntryBlock
);
2299 Builder
.CreateRetVoid();
2300 Builder
.SetInsertPoint(EntryBlock
, EntryBlock
->begin());
2302 ScopDetection::markFunctionAsInvalid(FN
);
2304 prepareKernelArguments(Kernel
, FN
);
2305 createKernelVariables(Kernel
, FN
);
2308 case GPUArch::NVPTX64
:
2309 insertKernelIntrinsics(Kernel
);
2311 case GPUArch::SPIR32
:
2312 insertKernelCallsSPIR(Kernel
, false);
2314 case GPUArch::SPIR64
:
2315 insertKernelCallsSPIR(Kernel
, true);
2320 std::string
GPUNodeBuilder::createKernelASM() {
2321 llvm::Triple GPUTriple
;
2324 case GPUArch::NVPTX64
:
2326 case GPURuntime::CUDA
:
2327 GPUTriple
= llvm::Triple(Triple::normalize("nvptx64-nvidia-cuda"));
2329 case GPURuntime::OpenCL
:
2330 GPUTriple
= llvm::Triple(Triple::normalize("nvptx64-nvidia-nvcl"));
2334 case GPUArch::SPIR64
:
2335 case GPUArch::SPIR32
:
2336 std::string SPIRAssembly
;
2337 raw_string_ostream
IROstream(SPIRAssembly
);
2338 IROstream
<< *GPUModule
;
2340 return SPIRAssembly
;
2344 auto GPUTarget
= TargetRegistry::lookupTarget(GPUTriple
.getTriple(), ErrMsg
);
2347 errs() << ErrMsg
<< "\n";
2351 TargetOptions Options
;
2352 Options
.UnsafeFPMath
= FastMath
;
2354 std::string subtarget
;
2357 case GPUArch::NVPTX64
:
2358 subtarget
= CudaVersion
;
2360 case GPUArch::SPIR32
:
2361 case GPUArch::SPIR64
:
2362 llvm_unreachable("No subtarget for SPIR architecture");
2365 std::unique_ptr
<TargetMachine
> TargetM(GPUTarget
->createTargetMachine(
2366 GPUTriple
.getTriple(), subtarget
, "", Options
, Optional
<Reloc::Model
>()));
2368 SmallString
<0> ASMString
;
2369 raw_svector_ostream
ASMStream(ASMString
);
2370 llvm::legacy::PassManager PM
;
2372 PM
.add(createTargetTransformInfoWrapperPass(TargetM
->getTargetIRAnalysis()));
2374 if (TargetM
->addPassesToEmitFile(PM
, ASMStream
, nullptr, CGFT_AssemblyFile
,
2375 true /* verify */)) {
2376 errs() << "The target does not support generation of this file type!\n";
2382 return ASMStream
.str().str();
2385 bool GPUNodeBuilder::requiresCUDALibDevice() {
2386 bool RequiresLibDevice
= false;
2387 for (Function
&F
: GPUModule
->functions()) {
2388 if (!F
.isDeclaration())
2391 const std::string CUDALibDeviceFunc
= getCUDALibDeviceFuntion(F
.getName());
2392 if (CUDALibDeviceFunc
.length() != 0) {
2393 // We need to handle the case where a module looks like this:
2395 // @llvm.exp.f64(..)
2396 // Both of these functions would be renamed to `__nv_expf`.
2398 // So, we must first check for the existence of the libdevice function.
2399 // If this exists, we replace our current function with it.
2401 // If it does not exist, we rename the current function to the
2402 // libdevice functiono name.
2403 if (Function
*Replacement
= F
.getParent()->getFunction(CUDALibDeviceFunc
))
2404 F
.replaceAllUsesWith(Replacement
);
2406 F
.setName(CUDALibDeviceFunc
);
2407 RequiresLibDevice
= true;
2411 return RequiresLibDevice
;
2414 void GPUNodeBuilder::addCUDALibDevice() {
2415 if (Arch
!= GPUArch::NVPTX64
)
2418 if (requiresCUDALibDevice()) {
2421 errs() << CUDALibDevice
<< "\n";
2422 auto LibDeviceModule
=
2423 parseIRFile(CUDALibDevice
, Error
, GPUModule
->getContext());
2425 if (!LibDeviceModule
) {
2426 BuildSuccessful
= false;
2427 report_fatal_error("Could not find or load libdevice. Skipping GPU "
2428 "kernel generation. Please set -polly-acc-libdevice "
2433 Linker
L(*GPUModule
);
2435 // Set an nvptx64 target triple to avoid linker warnings. The original
2436 // triple of the libdevice files are nvptx-unknown-unknown.
2437 LibDeviceModule
->setTargetTriple(Triple::normalize("nvptx64-nvidia-cuda"));
2438 L
.linkInModule(std::move(LibDeviceModule
), Linker::LinkOnlyNeeded
);
2442 std::string
GPUNodeBuilder::finalizeKernelFunction() {
2444 if (verifyModule(*GPUModule
)) {
2445 LLVM_DEBUG(dbgs() << "verifyModule failed on module:\n";
2446 GPUModule
->print(dbgs(), nullptr); dbgs() << "\n";);
2447 LLVM_DEBUG(dbgs() << "verifyModule Error:\n";
2448 verifyModule(*GPUModule
, &dbgs()););
2450 if (FailOnVerifyModuleFailure
)
2451 llvm_unreachable("VerifyModule failed.");
2453 BuildSuccessful
= false;
2460 outs() << *GPUModule
<< "\n";
2462 if (Arch
!= GPUArch::SPIR32
&& Arch
!= GPUArch::SPIR64
) {
2464 llvm::legacy::PassManager OptPasses
;
2465 PassManagerBuilder PassBuilder
;
2466 PassBuilder
.OptLevel
= 3;
2467 PassBuilder
.SizeLevel
= 0;
2468 PassBuilder
.populateModulePassManager(OptPasses
);
2469 OptPasses
.run(*GPUModule
);
2472 std::string Assembly
= createKernelASM();
2475 outs() << Assembly
<< "\n";
2477 GPUModule
.release();
2482 /// Construct an `isl_pw_aff_list` from a vector of `isl_pw_aff`
2483 /// @param PwAffs The list of piecewise affine functions to create an
2484 /// `isl_pw_aff_list` from. We expect an rvalue ref because
2485 /// all the isl_pw_aff are used up by this function.
2487 /// @returns The `isl_pw_aff_list`.
2488 __isl_give isl_pw_aff_list
*
2489 createPwAffList(isl_ctx
*Context
,
2490 const std::vector
<__isl_take isl_pw_aff
*> &&PwAffs
) {
2491 isl_pw_aff_list
*List
= isl_pw_aff_list_alloc(Context
, PwAffs
.size());
2493 for (unsigned i
= 0; i
< PwAffs
.size(); i
++) {
2494 List
= isl_pw_aff_list_insert(List
, i
, PwAffs
[i
]);
2499 /// Align all the `PwAffs` such that they have the same parameter dimensions.
2501 /// We loop over all `pw_aff` and align all of their spaces together to
2502 /// create a common space for all the `pw_aff`. This common space is the
2503 /// `AlignSpace`. We then align all the `pw_aff` to this space. We start
2504 /// with the given `SeedSpace`.
2505 /// @param PwAffs The list of piecewise affine functions we want to align.
2506 /// This is an rvalue reference because the entire vector is
2507 /// used up by the end of the operation.
2508 /// @param SeedSpace The space to start the alignment process with.
2509 /// @returns A std::pair, whose first element is the aligned space,
2510 /// whose second element is the vector of aligned piecewise
2512 static std::pair
<__isl_give isl_space
*, std::vector
<__isl_give isl_pw_aff
*>>
2513 alignPwAffs(const std::vector
<__isl_take isl_pw_aff
*> &&PwAffs
,
2514 __isl_take isl_space
*SeedSpace
) {
2515 assert(SeedSpace
&& "Invalid seed space given.");
2517 isl_space
*AlignSpace
= SeedSpace
;
2518 for (isl_pw_aff
*PwAff
: PwAffs
) {
2519 isl_space
*PwAffSpace
= isl_pw_aff_get_domain_space(PwAff
);
2520 AlignSpace
= isl_space_align_params(AlignSpace
, PwAffSpace
);
2522 std::vector
<isl_pw_aff
*> AdjustedPwAffs
;
2524 for (unsigned i
= 0; i
< PwAffs
.size(); i
++) {
2525 isl_pw_aff
*Adjusted
= PwAffs
[i
];
2526 assert(Adjusted
&& "Invalid pw_aff given.");
2527 Adjusted
= isl_pw_aff_align_params(Adjusted
, isl_space_copy(AlignSpace
));
2528 AdjustedPwAffs
.push_back(Adjusted
);
2530 return std::make_pair(AlignSpace
, AdjustedPwAffs
);
2534 class PPCGCodeGeneration
: public ScopPass
{
2538 GPURuntime Runtime
= GPURuntime::CUDA
;
2540 GPUArch Architecture
= GPUArch::NVPTX64
;
2542 /// The scop that is currently processed.
2547 ScalarEvolution
*SE
;
2548 const DataLayout
*DL
;
2551 PPCGCodeGeneration() : ScopPass(ID
) {}
2553 /// Construct compilation options for PPCG.
2555 /// @returns The compilation options.
2556 ppcg_options
*createPPCGOptions() {
2558 (ppcg_debug_options
*)malloc(sizeof(ppcg_debug_options
));
2559 auto Options
= (ppcg_options
*)malloc(sizeof(ppcg_options
));
2561 DebugOptions
->dump_schedule_constraints
= false;
2562 DebugOptions
->dump_schedule
= false;
2563 DebugOptions
->dump_final_schedule
= false;
2564 DebugOptions
->dump_sizes
= false;
2565 DebugOptions
->verbose
= false;
2567 Options
->debug
= DebugOptions
;
2569 Options
->group_chains
= false;
2570 Options
->reschedule
= true;
2571 Options
->scale_tile_loops
= false;
2572 Options
->wrap
= false;
2574 Options
->non_negative_parameters
= false;
2575 Options
->ctx
= nullptr;
2576 Options
->sizes
= nullptr;
2578 Options
->tile
= true;
2579 Options
->tile_size
= 32;
2581 Options
->isolate_full_tiles
= false;
2583 Options
->use_private_memory
= PrivateMemory
;
2584 Options
->use_shared_memory
= SharedMemory
;
2585 Options
->max_shared_memory
= 48 * 1024;
2587 Options
->target
= PPCG_TARGET_CUDA
;
2588 Options
->openmp
= false;
2589 Options
->linearize_device_arrays
= true;
2590 Options
->allow_gnu_extensions
= false;
2592 Options
->unroll_copy_shared
= false;
2593 Options
->unroll_gpu_tile
= false;
2594 Options
->live_range_reordering
= true;
2596 Options
->live_range_reordering
= true;
2597 Options
->hybrid
= false;
2598 Options
->opencl_compiler_options
= nullptr;
2599 Options
->opencl_use_gpu
= false;
2600 Options
->opencl_n_include_file
= 0;
2601 Options
->opencl_include_files
= nullptr;
2602 Options
->opencl_print_kernel_types
= false;
2603 Options
->opencl_embed_kernel_code
= false;
2605 Options
->save_schedule_file
= nullptr;
2606 Options
->load_schedule_file
= nullptr;
2611 /// Get a tagged access relation containing all accesses of type @p AccessTy.
2613 /// Instead of a normal access of the form:
2615 /// Stmt[i,j,k] -> Array[f_0(i,j,k), f_1(i,j,k)]
2617 /// a tagged access has the form
2619 /// [Stmt[i,j,k] -> id[]] -> Array[f_0(i,j,k), f_1(i,j,k)]
2621 /// where 'id' is an additional space that references the memory access that
2622 /// triggered the access.
2624 /// @param AccessTy The type of the memory accesses to collect.
2626 /// @return The relation describing all tagged memory accesses.
2627 isl_union_map
*getTaggedAccesses(enum MemoryAccess::AccessType AccessTy
) {
2628 isl_union_map
*Accesses
= isl_union_map_empty(S
->getParamSpace().release());
2630 for (auto &Stmt
: *S
)
2631 for (auto &Acc
: Stmt
)
2632 if (Acc
->getType() == AccessTy
) {
2633 isl_map
*Relation
= Acc
->getAccessRelation().release();
2635 isl_map_intersect_domain(Relation
, Stmt
.getDomain().release());
2637 isl_space
*Space
= isl_map_get_space(Relation
);
2638 Space
= isl_space_range(Space
);
2639 Space
= isl_space_from_range(Space
);
2641 isl_space_set_tuple_id(Space
, isl_dim_in
, Acc
->getId().release());
2642 isl_map
*Universe
= isl_map_universe(Space
);
2643 Relation
= isl_map_domain_product(Relation
, Universe
);
2644 Accesses
= isl_union_map_add_map(Accesses
, Relation
);
2650 /// Get the set of all read accesses, tagged with the access id.
2652 /// @see getTaggedAccesses
2653 isl_union_map
*getTaggedReads() {
2654 return getTaggedAccesses(MemoryAccess::READ
);
2657 /// Get the set of all may (and must) accesses, tagged with the access id.
2659 /// @see getTaggedAccesses
2660 isl_union_map
*getTaggedMayWrites() {
2661 return isl_union_map_union(getTaggedAccesses(MemoryAccess::MAY_WRITE
),
2662 getTaggedAccesses(MemoryAccess::MUST_WRITE
));
2665 /// Get the set of all must accesses, tagged with the access id.
2667 /// @see getTaggedAccesses
2668 isl_union_map
*getTaggedMustWrites() {
2669 return getTaggedAccesses(MemoryAccess::MUST_WRITE
);
2672 /// Collect parameter and array names as isl_ids.
2674 /// To reason about the different parameters and arrays used, ppcg requires
2675 /// a list of all isl_ids in use. As PPCG traditionally performs
2676 /// source-to-source compilation each of these isl_ids is mapped to the
2677 /// expression that represents it. As we do not have a corresponding
2678 /// expression in Polly, we just map each id to a 'zero' expression to match
2679 /// the data format that ppcg expects.
2681 /// @returns Retun a map from collected ids to 'zero' ast expressions.
2682 __isl_give isl_id_to_ast_expr
*getNames() {
2683 auto *Names
= isl_id_to_ast_expr_alloc(
2684 S
->getIslCtx().get(),
2685 S
->getNumParams() + std::distance(S
->array_begin(), S
->array_end()));
2686 auto *Zero
= isl_ast_expr_from_val(isl_val_zero(S
->getIslCtx().get()));
2688 for (const SCEV
*P
: S
->parameters()) {
2689 isl_id
*Id
= S
->getIdForParam(P
).release();
2690 Names
= isl_id_to_ast_expr_set(Names
, Id
, isl_ast_expr_copy(Zero
));
2693 for (auto &Array
: S
->arrays()) {
2694 auto Id
= Array
->getBasePtrId().release();
2695 Names
= isl_id_to_ast_expr_set(Names
, Id
, isl_ast_expr_copy(Zero
));
2698 isl_ast_expr_free(Zero
);
2703 /// Create a new PPCG scop from the current scop.
2705 /// The PPCG scop is initialized with data from the current polly::Scop. From
2706 /// this initial data, the data-dependences in the PPCG scop are initialized.
2707 /// We do not use Polly's dependence analysis for now, to ensure we match
2708 /// the PPCG default behaviour more closely.
2710 /// @returns A new ppcg scop.
2711 ppcg_scop
*createPPCGScop() {
2712 MustKillsInfo KillsInfo
= computeMustKillsInfo(*S
);
2714 auto PPCGScop
= (ppcg_scop
*)malloc(sizeof(ppcg_scop
));
2716 PPCGScop
->options
= createPPCGOptions();
2717 // enable live range reordering
2718 PPCGScop
->options
->live_range_reordering
= 1;
2720 PPCGScop
->start
= 0;
2723 PPCGScop
->context
= S
->getContext().release();
2724 PPCGScop
->domain
= S
->getDomains().release();
2725 // TODO: investigate this further. PPCG calls collect_call_domains.
2726 PPCGScop
->call
= isl_union_set_from_set(S
->getContext().release());
2727 PPCGScop
->tagged_reads
= getTaggedReads();
2728 PPCGScop
->reads
= S
->getReads().release();
2729 PPCGScop
->live_in
= nullptr;
2730 PPCGScop
->tagged_may_writes
= getTaggedMayWrites();
2731 PPCGScop
->may_writes
= S
->getWrites().release();
2732 PPCGScop
->tagged_must_writes
= getTaggedMustWrites();
2733 PPCGScop
->must_writes
= S
->getMustWrites().release();
2734 PPCGScop
->live_out
= nullptr;
2735 PPCGScop
->tagged_must_kills
= KillsInfo
.TaggedMustKills
.release();
2736 PPCGScop
->must_kills
= KillsInfo
.MustKills
.release();
2738 PPCGScop
->tagger
= nullptr;
2739 PPCGScop
->independence
=
2740 isl_union_map_empty(isl_set_get_space(PPCGScop
->context
));
2741 PPCGScop
->dep_flow
= nullptr;
2742 PPCGScop
->tagged_dep_flow
= nullptr;
2743 PPCGScop
->dep_false
= nullptr;
2744 PPCGScop
->dep_forced
= nullptr;
2745 PPCGScop
->dep_order
= nullptr;
2746 PPCGScop
->tagged_dep_order
= nullptr;
2748 PPCGScop
->schedule
= S
->getScheduleTree().release();
2749 // If we have something non-trivial to kill, add it to the schedule
2750 if (KillsInfo
.KillsSchedule
.get())
2751 PPCGScop
->schedule
= isl_schedule_sequence(
2752 PPCGScop
->schedule
, KillsInfo
.KillsSchedule
.release());
2754 PPCGScop
->names
= getNames();
2755 PPCGScop
->pet
= nullptr;
2757 compute_tagger(PPCGScop
);
2758 compute_dependences(PPCGScop
);
2759 eliminate_dead_code(PPCGScop
);
2764 /// Collect the array accesses in a statement.
2766 /// @param Stmt The statement for which to collect the accesses.
2768 /// @returns A list of array accesses.
2769 gpu_stmt_access
*getStmtAccesses(ScopStmt
&Stmt
) {
2770 gpu_stmt_access
*Accesses
= nullptr;
2772 for (MemoryAccess
*Acc
: Stmt
) {
2774 isl_alloc_type(S
->getIslCtx().get(), struct gpu_stmt_access
);
2775 Access
->read
= Acc
->isRead();
2776 Access
->write
= Acc
->isWrite();
2777 Access
->access
= Acc
->getAccessRelation().release();
2778 isl_space
*Space
= isl_map_get_space(Access
->access
);
2779 Space
= isl_space_range(Space
);
2780 Space
= isl_space_from_range(Space
);
2781 Space
= isl_space_set_tuple_id(Space
, isl_dim_in
, Acc
->getId().release());
2782 isl_map
*Universe
= isl_map_universe(Space
);
2783 Access
->tagged_access
=
2784 isl_map_domain_product(Acc
->getAccessRelation().release(), Universe
);
2785 Access
->exact_write
= !Acc
->isMayWrite();
2786 Access
->ref_id
= Acc
->getId().release();
2787 Access
->next
= Accesses
;
2788 Access
->n_index
= Acc
->getScopArrayInfo()->getNumberOfDimensions();
2789 // TODO: Also mark one-element accesses to arrays as fixed-element.
2790 Access
->fixed_element
=
2791 Acc
->isLatestScalarKind() ? isl_bool_true
: isl_bool_false
;
2798 /// Collect the list of GPU statements.
2800 /// Each statement has an id, a pointer to the underlying data structure,
2801 /// as well as a list with all memory accesses.
2803 /// TODO: Initialize the list of memory accesses.
2805 /// @returns A linked-list of statements.
2806 gpu_stmt
*getStatements() {
2807 gpu_stmt
*Stmts
= isl_calloc_array(S
->getIslCtx().get(), struct gpu_stmt
,
2808 std::distance(S
->begin(), S
->end()));
2811 for (auto &Stmt
: *S
) {
2812 gpu_stmt
*GPUStmt
= &Stmts
[i
];
2814 GPUStmt
->id
= Stmt
.getDomainId().release();
2816 // We use the pet stmt pointer to keep track of the Polly statements.
2817 GPUStmt
->stmt
= (pet_stmt
*)&Stmt
;
2818 GPUStmt
->accesses
= getStmtAccesses(Stmt
);
2825 /// Derive the extent of an array.
2827 /// The extent of an array is the set of elements that are within the
2828 /// accessed array. For the inner dimensions, the extent constraints are
2829 /// 0 and the size of the corresponding array dimension. For the first
2830 /// (outermost) dimension, the extent constraints are the minimal and maximal
2831 /// subscript value for the first dimension.
2833 /// @param Array The array to derive the extent for.
2835 /// @returns An isl_set describing the extent of the array.
2836 isl::set
getExtent(ScopArrayInfo
*Array
) {
2837 unsigned NumDims
= Array
->getNumberOfDimensions();
2839 if (Array
->getNumberOfDimensions() == 0)
2840 return isl::set::universe(Array
->getSpace());
2842 isl::union_map Accesses
= S
->getAccesses(Array
);
2843 isl::union_set AccessUSet
= Accesses
.range();
2844 AccessUSet
= AccessUSet
.coalesce();
2845 AccessUSet
= AccessUSet
.detect_equalities();
2846 AccessUSet
= AccessUSet
.coalesce();
2848 if (AccessUSet
.is_empty())
2849 return isl::set::empty(Array
->getSpace());
2851 isl::set AccessSet
= AccessUSet
.extract_set(Array
->getSpace());
2853 isl::local_space LS
= isl::local_space(Array
->getSpace());
2855 isl::pw_aff Val
= isl::aff::var_on_domain(LS
, isl::dim::set
, 0);
2856 isl::pw_aff OuterMin
= AccessSet
.dim_min(0);
2857 isl::pw_aff OuterMax
= AccessSet
.dim_max(0);
2858 OuterMin
= OuterMin
.add_dims(isl::dim::in
, Val
.dim(isl::dim::in
));
2859 OuterMax
= OuterMax
.add_dims(isl::dim::in
, Val
.dim(isl::dim::in
));
2860 OuterMin
= OuterMin
.set_tuple_id(isl::dim::in
, Array
->getBasePtrId());
2861 OuterMax
= OuterMax
.set_tuple_id(isl::dim::in
, Array
->getBasePtrId());
2863 isl::set Extent
= isl::set::universe(Array
->getSpace());
2865 Extent
= Extent
.intersect(OuterMin
.le_set(Val
));
2866 Extent
= Extent
.intersect(OuterMax
.ge_set(Val
));
2868 for (unsigned i
= 1; i
< NumDims
; ++i
)
2869 Extent
= Extent
.lower_bound_si(isl::dim::set
, i
, 0);
2871 for (unsigned i
= 0; i
< NumDims
; ++i
) {
2872 isl::pw_aff PwAff
= Array
->getDimensionSizePw(i
);
2874 // isl_pw_aff can be NULL for zero dimension. Only in the case of a
2875 // Fortran array will we have a legitimate dimension.
2876 if (PwAff
.is_null()) {
2877 assert(i
== 0 && "invalid dimension isl_pw_aff for nonzero dimension");
2881 isl::pw_aff Val
= isl::aff::var_on_domain(
2882 isl::local_space(Array
->getSpace()), isl::dim::set
, i
);
2883 PwAff
= PwAff
.add_dims(isl::dim::in
, Val
.dim(isl::dim::in
));
2884 PwAff
= PwAff
.set_tuple_id(isl::dim::in
, Val
.get_tuple_id(isl::dim::in
));
2885 isl::set Set
= PwAff
.gt_set(Val
);
2886 Extent
= Set
.intersect(Extent
);
2892 /// Derive the bounds of an array.
2894 /// For the first dimension we derive the bound of the array from the extent
2895 /// of this dimension. For inner dimensions we obtain their size directly from
2898 /// @param PPCGArray The array to compute bounds for.
2899 /// @param Array The polly array from which to take the information.
2900 void setArrayBounds(gpu_array_info
&PPCGArray
, ScopArrayInfo
*Array
) {
2901 std::vector
<isl_pw_aff
*> Bounds
;
2903 if (PPCGArray
.n_index
> 0) {
2904 if (isl_set_is_empty(PPCGArray
.extent
)) {
2905 isl_set
*Dom
= isl_set_copy(PPCGArray
.extent
);
2906 isl_local_space
*LS
= isl_local_space_from_space(
2907 isl_space_params(isl_set_get_space(Dom
)));
2909 isl_pw_aff
*Zero
= isl_pw_aff_from_aff(isl_aff_zero_on_domain(LS
));
2910 Bounds
.push_back(Zero
);
2912 isl_set
*Dom
= isl_set_copy(PPCGArray
.extent
);
2913 Dom
= isl_set_project_out(Dom
, isl_dim_set
, 1, PPCGArray
.n_index
- 1);
2914 isl_pw_aff
*Bound
= isl_set_dim_max(isl_set_copy(Dom
), 0);
2916 Dom
= isl_pw_aff_domain(isl_pw_aff_copy(Bound
));
2917 isl_local_space
*LS
=
2918 isl_local_space_from_space(isl_set_get_space(Dom
));
2919 isl_aff
*One
= isl_aff_zero_on_domain(LS
);
2920 One
= isl_aff_add_constant_si(One
, 1);
2921 Bound
= isl_pw_aff_add(Bound
, isl_pw_aff_alloc(Dom
, One
));
2922 Bound
= isl_pw_aff_gist(Bound
, S
->getContext().release());
2923 Bounds
.push_back(Bound
);
2927 for (unsigned i
= 1; i
< PPCGArray
.n_index
; ++i
) {
2928 isl_pw_aff
*Bound
= Array
->getDimensionSizePw(i
).release();
2929 auto LS
= isl_pw_aff_get_domain_space(Bound
);
2930 auto Aff
= isl_multi_aff_zero(LS
);
2932 // We need types to work out, which is why we perform this weird dance
2933 // with `Aff` and `Bound`. Consider this example:
2935 // LS: [p] -> { [] }
2936 // Zero: [p] -> { [] } | Implicitly, is [p] -> { ~ -> [] }.
2937 // This `~` is used to denote a "null space" (which is different from
2938 // a *zero dimensional* space), which is something that ISL does not
2939 // show you when pretty printing.
2941 // Bound: [p] -> { [] -> [(10p)] } | Here, the [] is a *zero dimensional*
2942 // space, not a "null space" which does not exist at all.
2944 // When we pullback (precompose) `Bound` with `Zero`, we get:
2946 // ([p] -> { [] -> [(10p)] }) . ([p] -> {~ -> [] }) =
2947 // [p] -> { ~ -> [(10p)] } =
2948 // [p] -> [(10p)] (as ISL pretty prints it)
2949 // Bound Pullback: [p] -> { [(10p)] }
2951 // We want this kind of an expression for Bound, without a
2952 // zero dimensional input, but with a "null space" input for the types
2953 // to work out later on, as far as I (Siddharth Bhat) understand.
2954 // I was unable to find a reference to this in the ISL manual.
2955 // References: Tobias Grosser.
2957 Bound
= isl_pw_aff_pullback_multi_aff(Bound
, Aff
);
2958 Bounds
.push_back(Bound
);
2961 /// To construct a `isl_multi_pw_aff`, we need all the indivisual `pw_aff`
2962 /// to have the same parameter dimensions. So, we need to align them to an
2963 /// appropriate space.
2964 /// Scop::Context is _not_ an appropriate space, because when we have
2965 /// `-polly-ignore-parameter-bounds` enabled, the Scop::Context does not
2966 /// contain all parameter dimensions.
2967 /// So, use the helper `alignPwAffs` to align all the `isl_pw_aff` together.
2968 isl_space
*SeedAlignSpace
= S
->getParamSpace().release();
2969 SeedAlignSpace
= isl_space_add_dims(SeedAlignSpace
, isl_dim_set
, 1);
2971 isl_space
*AlignSpace
= nullptr;
2972 std::vector
<isl_pw_aff
*> AlignedBounds
;
2973 std::tie(AlignSpace
, AlignedBounds
) =
2974 alignPwAffs(std::move(Bounds
), SeedAlignSpace
);
2976 assert(AlignSpace
&& "alignPwAffs did not initialise AlignSpace");
2978 isl_pw_aff_list
*BoundsList
=
2979 createPwAffList(S
->getIslCtx().get(), std::move(AlignedBounds
));
2981 isl_space
*BoundsSpace
= isl_set_get_space(PPCGArray
.extent
);
2982 BoundsSpace
= isl_space_align_params(BoundsSpace
, AlignSpace
);
2984 assert(BoundsSpace
&& "Unable to access space of array.");
2985 assert(BoundsList
&& "Unable to access list of bounds.");
2988 isl_multi_pw_aff_from_pw_aff_list(BoundsSpace
, BoundsList
);
2989 assert(PPCGArray
.bound
&& "PPCGArray.bound was not constructed correctly.");
2992 /// Create the arrays for @p PPCGProg.
2994 /// @param PPCGProg The program to compute the arrays for.
2995 void createArrays(gpu_prog
*PPCGProg
,
2996 const SmallVector
<ScopArrayInfo
*, 4> &ValidSAIs
) {
2998 for (auto &Array
: ValidSAIs
) {
2999 std::string TypeName
;
3000 raw_string_ostream
OS(TypeName
);
3002 OS
<< *Array
->getElementType();
3003 TypeName
= OS
.str();
3005 gpu_array_info
&PPCGArray
= PPCGProg
->array
[i
];
3007 PPCGArray
.space
= Array
->getSpace().release();
3008 PPCGArray
.type
= strdup(TypeName
.c_str());
3009 PPCGArray
.size
= DL
->getTypeAllocSize(Array
->getElementType());
3010 PPCGArray
.name
= strdup(Array
->getName().c_str());
3011 PPCGArray
.extent
= nullptr;
3012 PPCGArray
.n_index
= Array
->getNumberOfDimensions();
3013 PPCGArray
.extent
= getExtent(Array
).release();
3014 PPCGArray
.n_ref
= 0;
3015 PPCGArray
.refs
= nullptr;
3016 PPCGArray
.accessed
= true;
3017 PPCGArray
.read_only_scalar
=
3018 Array
->isReadOnly() && Array
->getNumberOfDimensions() == 0;
3019 PPCGArray
.has_compound_element
= false;
3020 PPCGArray
.local
= false;
3021 PPCGArray
.declare_local
= false;
3022 PPCGArray
.global
= false;
3023 PPCGArray
.linearize
= false;
3024 PPCGArray
.dep_order
= nullptr;
3025 PPCGArray
.user
= Array
;
3027 PPCGArray
.bound
= nullptr;
3028 setArrayBounds(PPCGArray
, Array
);
3031 collect_references(PPCGProg
, &PPCGArray
);
3032 PPCGArray
.only_fixed_element
= only_fixed_element_accessed(&PPCGArray
);
3036 /// Create an identity map between the arrays in the scop.
3038 /// @returns An identity map between the arrays in the scop.
3039 isl_union_map
*getArrayIdentity() {
3040 isl_union_map
*Maps
= isl_union_map_empty(S
->getParamSpace().release());
3042 for (auto &Array
: S
->arrays()) {
3043 isl_space
*Space
= Array
->getSpace().release();
3044 Space
= isl_space_map_from_set(Space
);
3045 isl_map
*Identity
= isl_map_identity(Space
);
3046 Maps
= isl_union_map_add_map(Maps
, Identity
);
3052 /// Create a default-initialized PPCG GPU program.
3054 /// @returns A new gpu program description.
3055 gpu_prog
*createPPCGProg(ppcg_scop
*PPCGScop
) {
3060 auto PPCGProg
= isl_calloc_type(S
->getIslCtx().get(), struct gpu_prog
);
3062 PPCGProg
->ctx
= S
->getIslCtx().get();
3063 PPCGProg
->scop
= PPCGScop
;
3064 PPCGProg
->context
= isl_set_copy(PPCGScop
->context
);
3065 PPCGProg
->read
= isl_union_map_copy(PPCGScop
->reads
);
3066 PPCGProg
->may_write
= isl_union_map_copy(PPCGScop
->may_writes
);
3067 PPCGProg
->must_write
= isl_union_map_copy(PPCGScop
->must_writes
);
3068 PPCGProg
->tagged_must_kill
=
3069 isl_union_map_copy(PPCGScop
->tagged_must_kills
);
3070 PPCGProg
->to_inner
= getArrayIdentity();
3071 PPCGProg
->to_outer
= getArrayIdentity();
3072 // TODO: verify that this assignment is correct.
3073 PPCGProg
->any_to_outer
= nullptr;
3074 PPCGProg
->n_stmts
= std::distance(S
->begin(), S
->end());
3075 PPCGProg
->stmts
= getStatements();
3077 // Only consider arrays that have a non-empty extent.
3078 // Otherwise, this will cause us to consider the following kinds of
3080 // 1. Invariant loads that are represented by SAI objects.
3081 // 2. Arrays with statically known zero size.
3082 auto ValidSAIsRange
=
3083 make_filter_range(S
->arrays(), [this](ScopArrayInfo
*SAI
) -> bool {
3084 return !getExtent(SAI
).is_empty();
3086 SmallVector
<ScopArrayInfo
*, 4> ValidSAIs(ValidSAIsRange
.begin(),
3087 ValidSAIsRange
.end());
3090 ValidSAIs
.size(); // std::distance(S->array_begin(), S->array_end());
3091 PPCGProg
->array
= isl_calloc_array(
3092 S
->getIslCtx().get(), struct gpu_array_info
, PPCGProg
->n_array
);
3094 createArrays(PPCGProg
, ValidSAIs
);
3096 PPCGProg
->array_order
= nullptr;
3097 collect_order_dependences(PPCGProg
);
3099 PPCGProg
->may_persist
= compute_may_persist(PPCGProg
);
3103 struct PrintGPUUserData
{
3104 struct cuda_info
*CudaInfo
;
3105 struct gpu_prog
*PPCGProg
;
3106 std::vector
<ppcg_kernel
*> Kernels
;
3109 /// Print a user statement node in the host code.
3111 /// We use ppcg's printing facilities to print the actual statement and
3112 /// additionally build up a list of all kernels that are encountered in the
3115 /// @param P The printer to print to
3116 /// @param Options The printing options to use
3117 /// @param Node The node to print
3118 /// @param User A user pointer to carry additional data. This pointer is
3119 /// expected to be of type PrintGPUUserData.
3121 /// @returns A printer to which the output has been printed.
3122 static __isl_give isl_printer
*
3123 printHostUser(__isl_take isl_printer
*P
,
3124 __isl_take isl_ast_print_options
*Options
,
3125 __isl_take isl_ast_node
*Node
, void *User
) {
3126 auto Data
= (struct PrintGPUUserData
*)User
;
3127 auto Id
= isl_ast_node_get_annotation(Node
);
3130 bool IsUser
= !strcmp(isl_id_get_name(Id
), "user");
3132 // If this is a user statement, format it ourselves as ppcg would
3133 // otherwise try to call pet functionality that is not available in
3136 P
= isl_printer_start_line(P
);
3137 P
= isl_printer_print_ast_node(P
, Node
);
3138 P
= isl_printer_end_line(P
);
3140 isl_ast_print_options_free(Options
);
3144 auto Kernel
= (struct ppcg_kernel
*)isl_id_get_user(Id
);
3146 Data
->Kernels
.push_back(Kernel
);
3149 return print_host_user(P
, Options
, Node
, User
);
3152 /// Print C code corresponding to the control flow in @p Kernel.
3154 /// @param Kernel The kernel to print
3155 void printKernel(ppcg_kernel
*Kernel
) {
3156 auto *P
= isl_printer_to_str(S
->getIslCtx().get());
3157 P
= isl_printer_set_output_format(P
, ISL_FORMAT_C
);
3158 auto *Options
= isl_ast_print_options_alloc(S
->getIslCtx().get());
3159 P
= isl_ast_node_print(Kernel
->tree
, P
, Options
);
3160 char *String
= isl_printer_get_str(P
);
3161 outs() << String
<< "\n";
3163 isl_printer_free(P
);
3166 /// Print C code corresponding to the GPU code described by @p Tree.
3168 /// @param Tree An AST describing GPU code
3169 /// @param PPCGProg The PPCG program from which @Tree has been constructed.
3170 void printGPUTree(isl_ast_node
*Tree
, gpu_prog
*PPCGProg
) {
3171 auto *P
= isl_printer_to_str(S
->getIslCtx().get());
3172 P
= isl_printer_set_output_format(P
, ISL_FORMAT_C
);
3174 PrintGPUUserData Data
;
3175 Data
.PPCGProg
= PPCGProg
;
3177 auto *Options
= isl_ast_print_options_alloc(S
->getIslCtx().get());
3179 isl_ast_print_options_set_print_user(Options
, printHostUser
, &Data
);
3180 P
= isl_ast_node_print(Tree
, P
, Options
);
3181 char *String
= isl_printer_get_str(P
);
3182 outs() << "# host\n";
3183 outs() << String
<< "\n";
3185 isl_printer_free(P
);
3187 for (auto Kernel
: Data
.Kernels
) {
3188 outs() << "# kernel" << Kernel
->id
<< "\n";
3189 printKernel(Kernel
);
3193 // Generate a GPU program using PPCG.
3195 // GPU mapping consists of multiple steps:
3197 // 1) Compute new schedule for the program.
3198 // 2) Map schedule to GPU (TODO)
3199 // 3) Generate code for new schedule (TODO)
3201 // We do not use here the Polly ScheduleOptimizer, as the schedule optimizer
3202 // is mostly CPU specific. Instead, we use PPCG's GPU code generation
3203 // strategy directly from this pass.
3204 gpu_gen
*generateGPU(ppcg_scop
*PPCGScop
, gpu_prog
*PPCGProg
) {
3206 auto PPCGGen
= isl_calloc_type(S
->getIslCtx().get(), struct gpu_gen
);
3208 PPCGGen
->ctx
= S
->getIslCtx().get();
3209 PPCGGen
->options
= PPCGScop
->options
;
3210 PPCGGen
->print
= nullptr;
3211 PPCGGen
->print_user
= nullptr;
3212 PPCGGen
->build_ast_expr
= &pollyBuildAstExprForStmt
;
3213 PPCGGen
->prog
= PPCGProg
;
3214 PPCGGen
->tree
= nullptr;
3215 PPCGGen
->types
.n
= 0;
3216 PPCGGen
->types
.name
= nullptr;
3217 PPCGGen
->sizes
= nullptr;
3218 PPCGGen
->used_sizes
= nullptr;
3219 PPCGGen
->kernel_id
= 0;
3221 // Set scheduling strategy to same strategy PPCG is using.
3222 isl_options_set_schedule_outer_coincidence(PPCGGen
->ctx
, true);
3223 isl_options_set_schedule_maximize_band_depth(PPCGGen
->ctx
, true);
3224 isl_options_set_schedule_whole_component(PPCGGen
->ctx
, false);
3226 isl_schedule
*Schedule
= get_schedule(PPCGGen
);
3228 int has_permutable
= has_any_permutable_node(Schedule
);
3231 isl_schedule_align_params(Schedule
, S
->getFullParamSpace().release());
3233 if (!has_permutable
|| has_permutable
< 0) {
3234 Schedule
= isl_schedule_free(Schedule
);
3235 LLVM_DEBUG(dbgs() << getUniqueScopName(S
)
3236 << " does not have permutable bands. Bailing out\n";);
3238 const bool CreateTransferToFromDevice
= !PollyManagedMemory
;
3239 Schedule
= map_to_device(PPCGGen
, Schedule
, CreateTransferToFromDevice
);
3240 PPCGGen
->tree
= generate_code(PPCGGen
, isl_schedule_copy(Schedule
));
3244 isl_printer
*P
= isl_printer_to_str(S
->getIslCtx().get());
3245 P
= isl_printer_set_yaml_style(P
, ISL_YAML_STYLE_BLOCK
);
3246 P
= isl_printer_print_str(P
, "Schedule\n");
3247 P
= isl_printer_print_str(P
, "========\n");
3249 P
= isl_printer_print_schedule(P
, Schedule
);
3251 P
= isl_printer_print_str(P
, "No schedule found\n");
3253 outs() << isl_printer_get_str(P
) << "\n";
3254 isl_printer_free(P
);
3261 printGPUTree(PPCGGen
->tree
, PPCGProg
);
3263 outs() << "No code generated\n";
3266 isl_schedule_free(Schedule
);
3271 /// Free gpu_gen structure.
3273 /// @param PPCGGen The ppcg_gen object to free.
3274 void freePPCGGen(gpu_gen
*PPCGGen
) {
3275 isl_ast_node_free(PPCGGen
->tree
);
3276 isl_union_map_free(PPCGGen
->sizes
);
3277 isl_union_map_free(PPCGGen
->used_sizes
);
3281 /// Free the options in the ppcg scop structure.
3283 /// ppcg is not freeing these options for us. To avoid leaks we do this
3286 /// @param PPCGScop The scop referencing the options to free.
3287 void freeOptions(ppcg_scop
*PPCGScop
) {
3288 free(PPCGScop
->options
->debug
);
3289 PPCGScop
->options
->debug
= nullptr;
3290 free(PPCGScop
->options
);
3291 PPCGScop
->options
= nullptr;
3294 /// Approximate the number of points in the set.
3296 /// This function returns an ast expression that overapproximates the number
3297 /// of points in an isl set through the rectangular hull surrounding this set.
3299 /// @param Set The set to count.
3300 /// @param Build The isl ast build object to use for creating the ast
3303 /// @returns An approximation of the number of points in the set.
3304 __isl_give isl_ast_expr
*approxPointsInSet(__isl_take isl_set
*Set
,
3305 __isl_keep isl_ast_build
*Build
) {
3307 isl_val
*One
= isl_val_int_from_si(isl_set_get_ctx(Set
), 1);
3308 auto *Expr
= isl_ast_expr_from_val(isl_val_copy(One
));
3310 isl_space
*Space
= isl_set_get_space(Set
);
3311 Space
= isl_space_params(Space
);
3312 auto *Univ
= isl_set_universe(Space
);
3313 isl_pw_aff
*OneAff
= isl_pw_aff_val_on_domain(Univ
, One
);
3315 for (long i
= 0, n
= isl_set_dim(Set
, isl_dim_set
); i
< n
; i
++) {
3316 isl_pw_aff
*Max
= isl_set_dim_max(isl_set_copy(Set
), i
);
3317 isl_pw_aff
*Min
= isl_set_dim_min(isl_set_copy(Set
), i
);
3318 isl_pw_aff
*DimSize
= isl_pw_aff_sub(Max
, Min
);
3319 DimSize
= isl_pw_aff_add(DimSize
, isl_pw_aff_copy(OneAff
));
3320 auto DimSizeExpr
= isl_ast_build_expr_from_pw_aff(Build
, DimSize
);
3321 Expr
= isl_ast_expr_mul(Expr
, DimSizeExpr
);
3325 isl_pw_aff_free(OneAff
);
3330 /// Approximate a number of dynamic instructions executed by a given
3333 /// @param Stmt The statement for which to compute the number of dynamic
3335 /// @param Build The isl ast build object to use for creating the ast
3337 /// @returns An approximation of the number of dynamic instructions executed
3339 __isl_give isl_ast_expr
*approxDynamicInst(ScopStmt
&Stmt
,
3340 __isl_keep isl_ast_build
*Build
) {
3341 auto Iterations
= approxPointsInSet(Stmt
.getDomain().release(), Build
);
3345 if (Stmt
.isBlockStmt()) {
3346 auto *BB
= Stmt
.getBasicBlock();
3347 InstCount
= std::distance(BB
->begin(), BB
->end());
3349 auto *R
= Stmt
.getRegion();
3351 for (auto *BB
: R
->blocks()) {
3352 InstCount
+= std::distance(BB
->begin(), BB
->end());
3356 isl_val
*InstVal
= isl_val_int_from_si(S
->getIslCtx().get(), InstCount
);
3357 auto *InstExpr
= isl_ast_expr_from_val(InstVal
);
3358 return isl_ast_expr_mul(InstExpr
, Iterations
);
3361 /// Approximate dynamic instructions executed in scop.
3363 /// @param S The scop for which to approximate dynamic instructions.
3364 /// @param Build The isl ast build object to use for creating the ast
3366 /// @returns An approximation of the number of dynamic instructions executed
3368 __isl_give isl_ast_expr
*
3369 getNumberOfIterations(Scop
&S
, __isl_keep isl_ast_build
*Build
) {
3370 isl_ast_expr
*Instructions
;
3372 isl_val
*Zero
= isl_val_int_from_si(S
.getIslCtx().get(), 0);
3373 Instructions
= isl_ast_expr_from_val(Zero
);
3375 for (ScopStmt
&Stmt
: S
) {
3376 isl_ast_expr
*StmtInstructions
= approxDynamicInst(Stmt
, Build
);
3377 Instructions
= isl_ast_expr_add(Instructions
, StmtInstructions
);
3379 return Instructions
;
3382 /// Create a check that ensures sufficient compute in scop.
3384 /// @param S The scop for which to ensure sufficient compute.
3385 /// @param Build The isl ast build object to use for creating the ast
3387 /// @returns An expression that evaluates to TRUE in case of sufficient
3388 /// compute and to FALSE, otherwise.
3389 __isl_give isl_ast_expr
*
3390 createSufficientComputeCheck(Scop
&S
, __isl_keep isl_ast_build
*Build
) {
3391 auto Iterations
= getNumberOfIterations(S
, Build
);
3392 auto *MinComputeVal
= isl_val_int_from_si(S
.getIslCtx().get(), MinCompute
);
3393 auto *MinComputeExpr
= isl_ast_expr_from_val(MinComputeVal
);
3394 return isl_ast_expr_ge(Iterations
, MinComputeExpr
);
3397 /// Check if the basic block contains a function we cannot codegen for GPU
3400 /// If this basic block does something with a `Function` other than calling
3401 /// a function that we support in a kernel, return true.
3402 bool containsInvalidKernelFunctionInBlock(const BasicBlock
*BB
,
3403 bool AllowCUDALibDevice
) {
3404 for (const Instruction
&Inst
: *BB
) {
3405 const CallInst
*Call
= dyn_cast
<CallInst
>(&Inst
);
3406 if (Call
&& isValidFunctionInKernel(Call
->getCalledFunction(),
3407 AllowCUDALibDevice
))
3410 for (Value
*Op
: Inst
.operands())
3411 // Look for (<func-type>*) among operands of Inst
3412 if (auto PtrTy
= dyn_cast
<PointerType
>(Op
->getType())) {
3413 if (isa
<FunctionType
>(PtrTy
->getElementType())) {
3415 << Inst
<< " has illegal use of function in kernel.\n");
3423 /// Return whether the Scop S uses functions in a way that we do not support.
3424 bool containsInvalidKernelFunction(const Scop
&S
, bool AllowCUDALibDevice
) {
3425 for (auto &Stmt
: S
) {
3426 if (Stmt
.isBlockStmt()) {
3427 if (containsInvalidKernelFunctionInBlock(Stmt
.getBasicBlock(),
3428 AllowCUDALibDevice
))
3431 assert(Stmt
.isRegionStmt() &&
3432 "Stmt was neither block nor region statement");
3433 for (const BasicBlock
*BB
: Stmt
.getRegion()->blocks())
3434 if (containsInvalidKernelFunctionInBlock(BB
, AllowCUDALibDevice
))
3441 /// Generate code for a given GPU AST described by @p Root.
3443 /// @param Root An isl_ast_node pointing to the root of the GPU AST.
3444 /// @param Prog The GPU Program to generate code for.
3445 void generateCode(__isl_take isl_ast_node
*Root
, gpu_prog
*Prog
) {
3446 ScopAnnotator Annotator
;
3447 Annotator
.buildAliasScopes(*S
);
3449 Region
*R
= &S
->getRegion();
3451 simplifyRegion(R
, DT
, LI
, RI
);
3453 BasicBlock
*EnteringBB
= R
->getEnteringBlock();
3455 PollyIRBuilder
Builder(EnteringBB
->getContext(), ConstantFolder(),
3456 IRInserter(Annotator
));
3457 Builder
.SetInsertPoint(EnteringBB
->getTerminator());
3459 // Only build the run-time condition and parameters _after_ having
3460 // introduced the conditional branch. This is important as the conditional
3461 // branch will guard the original scop from new induction variables that
3462 // the SCEVExpander may introduce while code generating the parameters and
3463 // which may introduce scalar dependences that prevent us from correctly
3464 // code generating this scop.
3465 BBPair StartExitBlocks
;
3466 BranchInst
*CondBr
= nullptr;
3467 std::tie(StartExitBlocks
, CondBr
) =
3468 executeScopConditionally(*S
, Builder
.getTrue(), *DT
, *RI
, *LI
);
3469 BasicBlock
*StartBlock
= std::get
<0>(StartExitBlocks
);
3471 assert(CondBr
&& "CondBr not initialized by executeScopConditionally");
3473 GPUNodeBuilder
NodeBuilder(Builder
, Annotator
, *DL
, *LI
, *SE
, *DT
, *S
,
3474 StartBlock
, Prog
, Runtime
, Architecture
);
3476 // TODO: Handle LICM
3477 auto SplitBlock
= StartBlock
->getSinglePredecessor();
3478 Builder
.SetInsertPoint(SplitBlock
->getTerminator());
3480 isl_ast_build
*Build
= isl_ast_build_alloc(S
->getIslCtx().get());
3481 isl_ast_expr
*Condition
= IslAst::buildRunCondition(*S
, Build
);
3482 isl_ast_expr
*SufficientCompute
= createSufficientComputeCheck(*S
, Build
);
3483 Condition
= isl_ast_expr_and(Condition
, SufficientCompute
);
3484 isl_ast_build_free(Build
);
3486 // preload invariant loads. Note: This should happen before the RTC
3487 // because the RTC may depend on values that are invariant load hoisted.
3488 if (!NodeBuilder
.preloadInvariantLoads()) {
3489 // Patch the introduced branch condition to ensure that we always execute
3490 // the original SCoP.
3491 auto *FalseI1
= Builder
.getFalse();
3492 auto *SplitBBTerm
= Builder
.GetInsertBlock()->getTerminator();
3493 SplitBBTerm
->setOperand(0, FalseI1
);
3495 LLVM_DEBUG(dbgs() << "preloading invariant loads failed in function: " +
3496 S
->getFunction().getName() +
3497 " | Scop Region: " + S
->getNameStr());
3498 // adjust the dominator tree accordingly.
3499 auto *ExitingBlock
= StartBlock
->getUniqueSuccessor();
3500 assert(ExitingBlock
);
3501 auto *MergeBlock
= ExitingBlock
->getUniqueSuccessor();
3503 polly::markBlockUnreachable(*StartBlock
, Builder
);
3504 polly::markBlockUnreachable(*ExitingBlock
, Builder
);
3505 auto *ExitingBB
= S
->getExitingBlock();
3508 DT
->changeImmediateDominator(MergeBlock
, ExitingBB
);
3509 DT
->eraseNode(ExitingBlock
);
3510 isl_ast_expr_free(Condition
);
3511 isl_ast_node_free(Root
);
3514 if (polly::PerfMonitoring
) {
3515 PerfMonitor
P(*S
, EnteringBB
->getParent()->getParent());
3517 P
.insertRegionStart(SplitBlock
->getTerminator());
3519 // TODO: actually think if this is the correct exiting block to place
3520 // the `end` performance marker. Invariant load hoisting changes
3521 // the CFG in a way that I do not precisely understand, so I
3522 // (Siddharth<siddu.druid@gmail.com>) should come back to this and
3523 // think about which exiting block to use.
3524 auto *ExitingBlock
= StartBlock
->getUniqueSuccessor();
3525 assert(ExitingBlock
);
3526 BasicBlock
*MergeBlock
= ExitingBlock
->getUniqueSuccessor();
3527 P
.insertRegionEnd(MergeBlock
->getTerminator());
3530 NodeBuilder
.addParameters(S
->getContext().release());
3531 Value
*RTC
= NodeBuilder
.createRTC(Condition
);
3532 Builder
.GetInsertBlock()->getTerminator()->setOperand(0, RTC
);
3534 Builder
.SetInsertPoint(&*StartBlock
->begin());
3536 NodeBuilder
.create(Root
);
3539 /// In case a sequential kernel has more surrounding loops as any parallel
3540 /// kernel, the SCoP is probably mostly sequential. Hence, there is no
3541 /// point in running it on a GPU.
3542 if (NodeBuilder
.DeepestSequential
> NodeBuilder
.DeepestParallel
)
3543 CondBr
->setOperand(0, Builder
.getFalse());
3545 if (!NodeBuilder
.BuildSuccessful
)
3546 CondBr
->setOperand(0, Builder
.getFalse());
3549 bool runOnScop(Scop
&CurrentScop
) override
{
3551 LI
= &getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
3552 DT
= &getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
3553 SE
= &getAnalysis
<ScalarEvolutionWrapperPass
>().getSE();
3554 DL
= &S
->getRegion().getEntry()->getModule()->getDataLayout();
3555 RI
= &getAnalysis
<RegionInfoPass
>().getRegionInfo();
3557 LLVM_DEBUG(dbgs() << "PPCGCodeGen running on : " << getUniqueScopName(S
)
3558 << " | loop depth: " << S
->getMaxLoopDepth() << "\n");
3560 // We currently do not support functions other than intrinsics inside
3561 // kernels, as code generation will need to offload function calls to the
3562 // kernel. This may lead to a kernel trying to call a function on the host.
3563 // This also allows us to prevent codegen from trying to take the
3564 // address of an intrinsic function to send to the kernel.
3565 if (containsInvalidKernelFunction(CurrentScop
,
3566 Architecture
== GPUArch::NVPTX64
)) {
3568 dbgs() << getUniqueScopName(S
)
3569 << " contains function which cannot be materialised in a GPU "
3570 "kernel. Bailing out.\n";);
3574 auto PPCGScop
= createPPCGScop();
3575 auto PPCGProg
= createPPCGProg(PPCGScop
);
3576 auto PPCGGen
= generateGPU(PPCGScop
, PPCGProg
);
3578 if (PPCGGen
->tree
) {
3579 generateCode(isl_ast_node_copy(PPCGGen
->tree
), PPCGProg
);
3580 CurrentScop
.markAsToBeSkipped();
3582 LLVM_DEBUG(dbgs() << getUniqueScopName(S
)
3583 << " has empty PPCGGen->tree. Bailing out.\n");
3586 freeOptions(PPCGScop
);
3587 freePPCGGen(PPCGGen
);
3588 gpu_prog_free(PPCGProg
);
3589 ppcg_scop_free(PPCGScop
);
3594 void printScop(raw_ostream
&, Scop
&) const override
{}
3596 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
3597 ScopPass::getAnalysisUsage(AU
);
3599 AU
.addRequired
<DominatorTreeWrapperPass
>();
3600 AU
.addRequired
<RegionInfoPass
>();
3601 AU
.addRequired
<ScalarEvolutionWrapperPass
>();
3602 AU
.addRequired
<ScopDetectionWrapperPass
>();
3603 AU
.addRequired
<ScopInfoRegionPass
>();
3604 AU
.addRequired
<LoopInfoWrapperPass
>();
3606 // FIXME: We do not yet add regions for the newly generated code to the
3612 char PPCGCodeGeneration::ID
= 1;
3614 Pass
*polly::createPPCGCodeGenerationPass(GPUArch Arch
, GPURuntime Runtime
) {
3615 PPCGCodeGeneration
*generator
= new PPCGCodeGeneration();
3616 generator
->Runtime
= Runtime
;
3617 generator
->Architecture
= Arch
;
3621 INITIALIZE_PASS_BEGIN(PPCGCodeGeneration
, "polly-codegen-ppcg",
3622 "Polly - Apply PPCG translation to SCOP", false, false)
3623 INITIALIZE_PASS_DEPENDENCY(DependenceInfo
);
3624 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
);
3625 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
);
3626 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass
);
3627 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass
);
3628 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass
);
3629 INITIALIZE_PASS_END(PPCGCodeGeneration
, "polly-codegen-ppcg",
3630 "Polly - Apply PPCG translation to SCOP", false, false)