[RISCV] Add shrinkwrap test cases showing gaps in current impl
[llvm-project.git] / clang / lib / CodeGen / CGStmtOpenMP.cpp
blob390516fea384985b6826db6e9680be5c0ee148f3
1 //===--- CGStmtOpenMP.cpp - Emit LLVM Code from Statements ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit OpenMP nodes as LLVM code.
11 //===----------------------------------------------------------------------===//
13 #include "CGCleanup.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Attr.h"
20 #include "clang/AST/DeclOpenMP.h"
21 #include "clang/AST/OpenMPClause.h"
22 #include "clang/AST/Stmt.h"
23 #include "clang/AST/StmtOpenMP.h"
24 #include "clang/AST/StmtVisitor.h"
25 #include "clang/Basic/OpenMPKinds.h"
26 #include "clang/Basic/PrettyStackTrace.h"
27 #include "clang/Basic/SourceManager.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/BinaryFormat/Dwarf.h"
30 #include "llvm/Frontend/OpenMP/OMPConstants.h"
31 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DebugInfoMetadata.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/Metadata.h"
37 #include "llvm/Support/AtomicOrdering.h"
38 #include "llvm/Support/Debug.h"
39 #include <optional>
40 using namespace clang;
41 using namespace CodeGen;
42 using namespace llvm::omp;
44 #define TTL_CODEGEN_TYPE "target-teams-loop-codegen"
46 static const VarDecl *getBaseDecl(const Expr *Ref);
47 static OpenMPDirectiveKind
48 getEffectiveDirectiveKind(const OMPExecutableDirective &S);
50 namespace {
51 /// Lexical scope for OpenMP executable constructs, that handles correct codegen
52 /// for captured expressions.
53 class OMPLexicalScope : public CodeGenFunction::LexicalScope {
54 void emitPreInitStmt(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
55 for (const auto *C : S.clauses()) {
56 if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
57 if (const auto *PreInit =
58 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
59 for (const auto *I : PreInit->decls()) {
60 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
61 CGF.EmitVarDecl(cast<VarDecl>(*I));
62 } else {
63 CodeGenFunction::AutoVarEmission Emission =
64 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
65 CGF.EmitAutoVarCleanups(Emission);
72 CodeGenFunction::OMPPrivateScope InlinedShareds;
74 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
75 return CGF.LambdaCaptureFields.lookup(VD) ||
76 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
77 (isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl) &&
78 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
81 public:
82 OMPLexicalScope(
83 CodeGenFunction &CGF, const OMPExecutableDirective &S,
84 const std::optional<OpenMPDirectiveKind> CapturedRegion = std::nullopt,
85 const bool EmitPreInitStmt = true)
86 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
87 InlinedShareds(CGF) {
88 if (EmitPreInitStmt)
89 emitPreInitStmt(CGF, S);
90 if (!CapturedRegion)
91 return;
92 assert(S.hasAssociatedStmt() &&
93 "Expected associated statement for inlined directive.");
94 const CapturedStmt *CS = S.getCapturedStmt(*CapturedRegion);
95 for (const auto &C : CS->captures()) {
96 if (C.capturesVariable() || C.capturesVariableByCopy()) {
97 auto *VD = C.getCapturedVar();
98 assert(VD == VD->getCanonicalDecl() &&
99 "Canonical decl must be captured.");
100 DeclRefExpr DRE(
101 CGF.getContext(), const_cast<VarDecl *>(VD),
102 isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
103 InlinedShareds.isGlobalVarCaptured(VD)),
104 VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
105 InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
108 (void)InlinedShareds.Privatize();
112 /// Lexical scope for OpenMP parallel construct, that handles correct codegen
113 /// for captured expressions.
114 class OMPParallelScope final : public OMPLexicalScope {
115 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
116 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
117 return !(isOpenMPTargetExecutionDirective(EKind) ||
118 isOpenMPLoopBoundSharingDirective(EKind)) &&
119 isOpenMPParallelDirective(EKind);
122 public:
123 OMPParallelScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
124 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/std::nullopt,
125 EmitPreInitStmt(S)) {}
128 /// Lexical scope for OpenMP teams construct, that handles correct codegen
129 /// for captured expressions.
130 class OMPTeamsScope final : public OMPLexicalScope {
131 bool EmitPreInitStmt(const OMPExecutableDirective &S) {
132 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
133 return !isOpenMPTargetExecutionDirective(EKind) &&
134 isOpenMPTeamsDirective(EKind);
137 public:
138 OMPTeamsScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
139 : OMPLexicalScope(CGF, S, /*CapturedRegion=*/std::nullopt,
140 EmitPreInitStmt(S)) {}
143 /// Private scope for OpenMP loop-based directives, that supports capturing
144 /// of used expression from loop statement.
145 class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
146 void emitPreInitStmt(CodeGenFunction &CGF, const OMPLoopBasedDirective &S) {
147 const Stmt *PreInits;
148 CodeGenFunction::OMPMapVars PreCondVars;
149 if (auto *LD = dyn_cast<OMPLoopDirective>(&S)) {
150 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
151 for (const auto *E : LD->counters()) {
152 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
153 EmittedAsPrivate.insert(VD->getCanonicalDecl());
154 (void)PreCondVars.setVarAddr(
155 CGF, VD, CGF.CreateMemTemp(VD->getType().getNonReferenceType()));
157 // Mark private vars as undefs.
158 for (const auto *C : LD->getClausesOfKind<OMPPrivateClause>()) {
159 for (const Expr *IRef : C->varlist()) {
160 const auto *OrigVD =
161 cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
162 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
163 QualType OrigVDTy = OrigVD->getType().getNonReferenceType();
164 (void)PreCondVars.setVarAddr(
165 CGF, OrigVD,
166 Address(llvm::UndefValue::get(CGF.ConvertTypeForMem(
167 CGF.getContext().getPointerType(OrigVDTy))),
168 CGF.ConvertTypeForMem(OrigVDTy),
169 CGF.getContext().getDeclAlign(OrigVD)));
173 (void)PreCondVars.apply(CGF);
174 // Emit init, __range and __end variables for C++ range loops.
175 (void)OMPLoopBasedDirective::doForAllLoops(
176 LD->getInnermostCapturedStmt()->getCapturedStmt(),
177 /*TryImperfectlyNestedLoops=*/true, LD->getLoopsNumber(),
178 [&CGF](unsigned Cnt, const Stmt *CurStmt) {
179 if (const auto *CXXFor = dyn_cast<CXXForRangeStmt>(CurStmt)) {
180 if (const Stmt *Init = CXXFor->getInit())
181 CGF.EmitStmt(Init);
182 CGF.EmitStmt(CXXFor->getRangeStmt());
183 CGF.EmitStmt(CXXFor->getEndStmt());
185 return false;
187 PreInits = LD->getPreInits();
188 } else if (const auto *Tile = dyn_cast<OMPTileDirective>(&S)) {
189 PreInits = Tile->getPreInits();
190 } else if (const auto *Unroll = dyn_cast<OMPUnrollDirective>(&S)) {
191 PreInits = Unroll->getPreInits();
192 } else if (const auto *Reverse = dyn_cast<OMPReverseDirective>(&S)) {
193 PreInits = Reverse->getPreInits();
194 } else if (const auto *Interchange =
195 dyn_cast<OMPInterchangeDirective>(&S)) {
196 PreInits = Interchange->getPreInits();
197 } else {
198 llvm_unreachable("Unknown loop-based directive kind.");
200 if (PreInits) {
201 // CompoundStmts and DeclStmts are used as lists of PreInit statements and
202 // declarations. Since declarations must be visible in the the following
203 // that they initialize, unpack the CompoundStmt they are nested in.
204 SmallVector<const Stmt *> PreInitStmts;
205 if (auto *PreInitCompound = dyn_cast<CompoundStmt>(PreInits))
206 llvm::append_range(PreInitStmts, PreInitCompound->body());
207 else
208 PreInitStmts.push_back(PreInits);
210 for (const Stmt *S : PreInitStmts) {
211 // EmitStmt skips any OMPCapturedExprDecls, but needs to be emitted
212 // here.
213 if (auto *PreInitDecl = dyn_cast<DeclStmt>(S)) {
214 for (Decl *I : PreInitDecl->decls())
215 CGF.EmitVarDecl(cast<VarDecl>(*I));
216 continue;
218 CGF.EmitStmt(S);
221 PreCondVars.restore(CGF);
224 public:
225 OMPLoopScope(CodeGenFunction &CGF, const OMPLoopBasedDirective &S)
226 : CodeGenFunction::RunCleanupsScope(CGF) {
227 emitPreInitStmt(CGF, S);
231 class OMPSimdLexicalScope : public CodeGenFunction::LexicalScope {
232 CodeGenFunction::OMPPrivateScope InlinedShareds;
234 static bool isCapturedVar(CodeGenFunction &CGF, const VarDecl *VD) {
235 return CGF.LambdaCaptureFields.lookup(VD) ||
236 (CGF.CapturedStmtInfo && CGF.CapturedStmtInfo->lookup(VD)) ||
237 (isa_and_nonnull<BlockDecl>(CGF.CurCodeDecl) &&
238 cast<BlockDecl>(CGF.CurCodeDecl)->capturesVariable(VD));
241 public:
242 OMPSimdLexicalScope(CodeGenFunction &CGF, const OMPExecutableDirective &S)
243 : CodeGenFunction::LexicalScope(CGF, S.getSourceRange()),
244 InlinedShareds(CGF) {
245 for (const auto *C : S.clauses()) {
246 if (const auto *CPI = OMPClauseWithPreInit::get(C)) {
247 if (const auto *PreInit =
248 cast_or_null<DeclStmt>(CPI->getPreInitStmt())) {
249 for (const auto *I : PreInit->decls()) {
250 if (!I->hasAttr<OMPCaptureNoInitAttr>()) {
251 CGF.EmitVarDecl(cast<VarDecl>(*I));
252 } else {
253 CodeGenFunction::AutoVarEmission Emission =
254 CGF.EmitAutoVarAlloca(cast<VarDecl>(*I));
255 CGF.EmitAutoVarCleanups(Emission);
259 } else if (const auto *UDP = dyn_cast<OMPUseDevicePtrClause>(C)) {
260 for (const Expr *E : UDP->varlist()) {
261 const Decl *D = cast<DeclRefExpr>(E)->getDecl();
262 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
263 CGF.EmitVarDecl(*OED);
265 } else if (const auto *UDP = dyn_cast<OMPUseDeviceAddrClause>(C)) {
266 for (const Expr *E : UDP->varlist()) {
267 const Decl *D = getBaseDecl(E);
268 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
269 CGF.EmitVarDecl(*OED);
273 if (!isOpenMPSimdDirective(getEffectiveDirectiveKind(S)))
274 CGF.EmitOMPPrivateClause(S, InlinedShareds);
275 if (const auto *TG = dyn_cast<OMPTaskgroupDirective>(&S)) {
276 if (const Expr *E = TG->getReductionRef())
277 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl()));
279 // Temp copy arrays for inscan reductions should not be emitted as they are
280 // not used in simd only mode.
281 llvm::DenseSet<CanonicalDeclPtr<const Decl>> CopyArrayTemps;
282 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
283 if (C->getModifier() != OMPC_REDUCTION_inscan)
284 continue;
285 for (const Expr *E : C->copy_array_temps())
286 CopyArrayTemps.insert(cast<DeclRefExpr>(E)->getDecl());
288 const auto *CS = cast_or_null<CapturedStmt>(S.getAssociatedStmt());
289 while (CS) {
290 for (auto &C : CS->captures()) {
291 if (C.capturesVariable() || C.capturesVariableByCopy()) {
292 auto *VD = C.getCapturedVar();
293 if (CopyArrayTemps.contains(VD))
294 continue;
295 assert(VD == VD->getCanonicalDecl() &&
296 "Canonical decl must be captured.");
297 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(VD),
298 isCapturedVar(CGF, VD) ||
299 (CGF.CapturedStmtInfo &&
300 InlinedShareds.isGlobalVarCaptured(VD)),
301 VD->getType().getNonReferenceType(), VK_LValue,
302 C.getLocation());
303 InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
306 CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
308 (void)InlinedShareds.Privatize();
312 } // namespace
314 // The loop directive with a bind clause will be mapped to a different
315 // directive with corresponding semantics.
316 static OpenMPDirectiveKind
317 getEffectiveDirectiveKind(const OMPExecutableDirective &S) {
318 OpenMPDirectiveKind Kind = S.getDirectiveKind();
319 if (Kind != OMPD_loop)
320 return Kind;
322 OpenMPBindClauseKind BindKind = OMPC_BIND_unknown;
323 if (const auto *C = S.getSingleClause<OMPBindClause>())
324 BindKind = C->getBindKind();
326 switch (BindKind) {
327 case OMPC_BIND_parallel:
328 return OMPD_for;
329 case OMPC_BIND_teams:
330 return OMPD_distribute;
331 case OMPC_BIND_thread:
332 return OMPD_simd;
333 default:
334 return OMPD_loop;
338 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
339 const OMPExecutableDirective &S,
340 const RegionCodeGenTy &CodeGen);
342 LValue CodeGenFunction::EmitOMPSharedLValue(const Expr *E) {
343 if (const auto *OrigDRE = dyn_cast<DeclRefExpr>(E)) {
344 if (const auto *OrigVD = dyn_cast<VarDecl>(OrigDRE->getDecl())) {
345 OrigVD = OrigVD->getCanonicalDecl();
346 bool IsCaptured =
347 LambdaCaptureFields.lookup(OrigVD) ||
348 (CapturedStmtInfo && CapturedStmtInfo->lookup(OrigVD)) ||
349 (isa_and_nonnull<BlockDecl>(CurCodeDecl));
350 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD), IsCaptured,
351 OrigDRE->getType(), VK_LValue, OrigDRE->getExprLoc());
352 return EmitLValue(&DRE);
355 return EmitLValue(E);
358 llvm::Value *CodeGenFunction::getTypeSize(QualType Ty) {
359 ASTContext &C = getContext();
360 llvm::Value *Size = nullptr;
361 auto SizeInChars = C.getTypeSizeInChars(Ty);
362 if (SizeInChars.isZero()) {
363 // getTypeSizeInChars() returns 0 for a VLA.
364 while (const VariableArrayType *VAT = C.getAsVariableArrayType(Ty)) {
365 VlaSizePair VlaSize = getVLASize(VAT);
366 Ty = VlaSize.Type;
367 Size =
368 Size ? Builder.CreateNUWMul(Size, VlaSize.NumElts) : VlaSize.NumElts;
370 SizeInChars = C.getTypeSizeInChars(Ty);
371 if (SizeInChars.isZero())
372 return llvm::ConstantInt::get(SizeTy, /*V=*/0);
373 return Builder.CreateNUWMul(Size, CGM.getSize(SizeInChars));
375 return CGM.getSize(SizeInChars);
378 void CodeGenFunction::GenerateOpenMPCapturedVars(
379 const CapturedStmt &S, SmallVectorImpl<llvm::Value *> &CapturedVars) {
380 const RecordDecl *RD = S.getCapturedRecordDecl();
381 auto CurField = RD->field_begin();
382 auto CurCap = S.captures().begin();
383 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
384 E = S.capture_init_end();
385 I != E; ++I, ++CurField, ++CurCap) {
386 if (CurField->hasCapturedVLAType()) {
387 const VariableArrayType *VAT = CurField->getCapturedVLAType();
388 llvm::Value *Val = VLASizeMap[VAT->getSizeExpr()];
389 CapturedVars.push_back(Val);
390 } else if (CurCap->capturesThis()) {
391 CapturedVars.push_back(CXXThisValue);
392 } else if (CurCap->capturesVariableByCopy()) {
393 llvm::Value *CV = EmitLoadOfScalar(EmitLValue(*I), CurCap->getLocation());
395 // If the field is not a pointer, we need to save the actual value
396 // and load it as a void pointer.
397 if (!CurField->getType()->isAnyPointerType()) {
398 ASTContext &Ctx = getContext();
399 Address DstAddr = CreateMemTemp(
400 Ctx.getUIntPtrType(),
401 Twine(CurCap->getCapturedVar()->getName(), ".casted"));
402 LValue DstLV = MakeAddrLValue(DstAddr, Ctx.getUIntPtrType());
404 llvm::Value *SrcAddrVal = EmitScalarConversion(
405 DstAddr.emitRawPointer(*this),
406 Ctx.getPointerType(Ctx.getUIntPtrType()),
407 Ctx.getPointerType(CurField->getType()), CurCap->getLocation());
408 LValue SrcLV =
409 MakeNaturalAlignAddrLValue(SrcAddrVal, CurField->getType());
411 // Store the value using the source type pointer.
412 EmitStoreThroughLValue(RValue::get(CV), SrcLV);
414 // Load the value using the destination type pointer.
415 CV = EmitLoadOfScalar(DstLV, CurCap->getLocation());
417 CapturedVars.push_back(CV);
418 } else {
419 assert(CurCap->capturesVariable() && "Expected capture by reference.");
420 CapturedVars.push_back(EmitLValue(*I).getAddress().emitRawPointer(*this));
425 static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
426 QualType DstType, StringRef Name,
427 LValue AddrLV) {
428 ASTContext &Ctx = CGF.getContext();
430 llvm::Value *CastedPtr = CGF.EmitScalarConversion(
431 AddrLV.getAddress().emitRawPointer(CGF), Ctx.getUIntPtrType(),
432 Ctx.getPointerType(DstType), Loc);
433 // FIXME: should the pointee type (DstType) be passed?
434 Address TmpAddr =
435 CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress();
436 return TmpAddr;
439 static QualType getCanonicalParamType(ASTContext &C, QualType T) {
440 if (T->isLValueReferenceType())
441 return C.getLValueReferenceType(
442 getCanonicalParamType(C, T.getNonReferenceType()),
443 /*SpelledAsLValue=*/false);
444 if (T->isPointerType())
445 return C.getPointerType(getCanonicalParamType(C, T->getPointeeType()));
446 if (const ArrayType *A = T->getAsArrayTypeUnsafe()) {
447 if (const auto *VLA = dyn_cast<VariableArrayType>(A))
448 return getCanonicalParamType(C, VLA->getElementType());
449 if (!A->isVariablyModifiedType())
450 return C.getCanonicalType(T);
452 return C.getCanonicalParamType(T);
455 namespace {
456 /// Contains required data for proper outlined function codegen.
457 struct FunctionOptions {
458 /// Captured statement for which the function is generated.
459 const CapturedStmt *S = nullptr;
460 /// true if cast to/from UIntPtr is required for variables captured by
461 /// value.
462 const bool UIntPtrCastRequired = true;
463 /// true if only casted arguments must be registered as local args or VLA
464 /// sizes.
465 const bool RegisterCastedArgsOnly = false;
466 /// Name of the generated function.
467 const StringRef FunctionName;
468 /// Location of the non-debug version of the outlined function.
469 SourceLocation Loc;
470 explicit FunctionOptions(const CapturedStmt *S, bool UIntPtrCastRequired,
471 bool RegisterCastedArgsOnly, StringRef FunctionName,
472 SourceLocation Loc)
473 : S(S), UIntPtrCastRequired(UIntPtrCastRequired),
474 RegisterCastedArgsOnly(UIntPtrCastRequired && RegisterCastedArgsOnly),
475 FunctionName(FunctionName), Loc(Loc) {}
477 } // namespace
479 static llvm::Function *emitOutlinedFunctionPrologue(
480 CodeGenFunction &CGF, FunctionArgList &Args,
481 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>>
482 &LocalAddrs,
483 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>>
484 &VLASizes,
485 llvm::Value *&CXXThisValue, const FunctionOptions &FO) {
486 const CapturedDecl *CD = FO.S->getCapturedDecl();
487 const RecordDecl *RD = FO.S->getCapturedRecordDecl();
488 assert(CD->hasBody() && "missing CapturedDecl body");
490 CXXThisValue = nullptr;
491 // Build the argument list.
492 CodeGenModule &CGM = CGF.CGM;
493 ASTContext &Ctx = CGM.getContext();
494 FunctionArgList TargetArgs;
495 Args.append(CD->param_begin(),
496 std::next(CD->param_begin(), CD->getContextParamPosition()));
497 TargetArgs.append(
498 CD->param_begin(),
499 std::next(CD->param_begin(), CD->getContextParamPosition()));
500 auto I = FO.S->captures().begin();
501 FunctionDecl *DebugFunctionDecl = nullptr;
502 if (!FO.UIntPtrCastRequired) {
503 FunctionProtoType::ExtProtoInfo EPI;
504 QualType FunctionTy = Ctx.getFunctionType(Ctx.VoidTy, {}, EPI);
505 DebugFunctionDecl = FunctionDecl::Create(
506 Ctx, Ctx.getTranslationUnitDecl(), FO.S->getBeginLoc(),
507 SourceLocation(), DeclarationName(), FunctionTy,
508 Ctx.getTrivialTypeSourceInfo(FunctionTy), SC_Static,
509 /*UsesFPIntrin=*/false, /*isInlineSpecified=*/false,
510 /*hasWrittenPrototype=*/false);
512 for (const FieldDecl *FD : RD->fields()) {
513 QualType ArgType = FD->getType();
514 IdentifierInfo *II = nullptr;
515 VarDecl *CapVar = nullptr;
517 // If this is a capture by copy and the type is not a pointer, the outlined
518 // function argument type should be uintptr and the value properly casted to
519 // uintptr. This is necessary given that the runtime library is only able to
520 // deal with pointers. We can pass in the same way the VLA type sizes to the
521 // outlined function.
522 if (FO.UIntPtrCastRequired &&
523 ((I->capturesVariableByCopy() && !ArgType->isAnyPointerType()) ||
524 I->capturesVariableArrayType()))
525 ArgType = Ctx.getUIntPtrType();
527 if (I->capturesVariable() || I->capturesVariableByCopy()) {
528 CapVar = I->getCapturedVar();
529 II = CapVar->getIdentifier();
530 } else if (I->capturesThis()) {
531 II = &Ctx.Idents.get("this");
532 } else {
533 assert(I->capturesVariableArrayType());
534 II = &Ctx.Idents.get("vla");
536 if (ArgType->isVariablyModifiedType())
537 ArgType = getCanonicalParamType(Ctx, ArgType);
538 VarDecl *Arg;
539 if (CapVar && (CapVar->getTLSKind() != clang::VarDecl::TLS_None)) {
540 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
541 II, ArgType,
542 ImplicitParamKind::ThreadPrivateVar);
543 } else if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
544 Arg = ParmVarDecl::Create(
545 Ctx, DebugFunctionDecl,
546 CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
547 CapVar ? CapVar->getLocation() : FD->getLocation(), II, ArgType,
548 /*TInfo=*/nullptr, SC_None, /*DefArg=*/nullptr);
549 } else {
550 Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
551 II, ArgType, ImplicitParamKind::Other);
553 Args.emplace_back(Arg);
554 // Do not cast arguments if we emit function with non-original types.
555 TargetArgs.emplace_back(
556 FO.UIntPtrCastRequired
557 ? Arg
558 : CGM.getOpenMPRuntime().translateParameter(FD, Arg));
559 ++I;
561 Args.append(std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
562 CD->param_end());
563 TargetArgs.append(
564 std::next(CD->param_begin(), CD->getContextParamPosition() + 1),
565 CD->param_end());
567 // Create the function declaration.
568 const CGFunctionInfo &FuncInfo =
569 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, TargetArgs);
570 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
572 auto *F =
573 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
574 FO.FunctionName, &CGM.getModule());
575 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
576 if (CD->isNothrow())
577 F->setDoesNotThrow();
578 F->setDoesNotRecurse();
580 // Always inline the outlined function if optimizations are enabled.
581 if (CGM.getCodeGenOpts().OptimizationLevel != 0) {
582 F->removeFnAttr(llvm::Attribute::NoInline);
583 F->addFnAttr(llvm::Attribute::AlwaysInline);
586 // Generate the function.
587 CGF.StartFunction(CD, Ctx.VoidTy, F, FuncInfo, TargetArgs,
588 FO.UIntPtrCastRequired ? FO.Loc : FO.S->getBeginLoc(),
589 FO.UIntPtrCastRequired ? FO.Loc
590 : CD->getBody()->getBeginLoc());
591 unsigned Cnt = CD->getContextParamPosition();
592 I = FO.S->captures().begin();
593 for (const FieldDecl *FD : RD->fields()) {
594 // Do not map arguments if we emit function with non-original types.
595 Address LocalAddr(Address::invalid());
596 if (!FO.UIntPtrCastRequired && Args[Cnt] != TargetArgs[Cnt]) {
597 LocalAddr = CGM.getOpenMPRuntime().getParameterAddress(CGF, Args[Cnt],
598 TargetArgs[Cnt]);
599 } else {
600 LocalAddr = CGF.GetAddrOfLocalVar(Args[Cnt]);
602 // If we are capturing a pointer by copy we don't need to do anything, just
603 // use the value that we get from the arguments.
604 if (I->capturesVariableByCopy() && FD->getType()->isAnyPointerType()) {
605 const VarDecl *CurVD = I->getCapturedVar();
606 if (!FO.RegisterCastedArgsOnly)
607 LocalAddrs.insert({Args[Cnt], {CurVD, LocalAddr}});
608 ++Cnt;
609 ++I;
610 continue;
613 LValue ArgLVal = CGF.MakeAddrLValue(LocalAddr, Args[Cnt]->getType(),
614 AlignmentSource::Decl);
615 if (FD->hasCapturedVLAType()) {
616 if (FO.UIntPtrCastRequired) {
617 ArgLVal = CGF.MakeAddrLValue(
618 castValueFromUintptr(CGF, I->getLocation(), FD->getType(),
619 Args[Cnt]->getName(), ArgLVal),
620 FD->getType(), AlignmentSource::Decl);
622 llvm::Value *ExprArg = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
623 const VariableArrayType *VAT = FD->getCapturedVLAType();
624 VLASizes.try_emplace(Args[Cnt], VAT->getSizeExpr(), ExprArg);
625 } else if (I->capturesVariable()) {
626 const VarDecl *Var = I->getCapturedVar();
627 QualType VarTy = Var->getType();
628 Address ArgAddr = ArgLVal.getAddress();
629 if (ArgLVal.getType()->isLValueReferenceType()) {
630 ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
631 } else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
632 assert(ArgLVal.getType()->isPointerType());
633 ArgAddr = CGF.EmitLoadOfPointer(
634 ArgAddr, ArgLVal.getType()->castAs<PointerType>());
636 if (!FO.RegisterCastedArgsOnly) {
637 LocalAddrs.insert(
638 {Args[Cnt], {Var, ArgAddr.withAlignment(Ctx.getDeclAlign(Var))}});
640 } else if (I->capturesVariableByCopy()) {
641 assert(!FD->getType()->isAnyPointerType() &&
642 "Not expecting a captured pointer.");
643 const VarDecl *Var = I->getCapturedVar();
644 LocalAddrs.insert({Args[Cnt],
645 {Var, FO.UIntPtrCastRequired
646 ? castValueFromUintptr(
647 CGF, I->getLocation(), FD->getType(),
648 Args[Cnt]->getName(), ArgLVal)
649 : ArgLVal.getAddress()}});
650 } else {
651 // If 'this' is captured, load it into CXXThisValue.
652 assert(I->capturesThis());
653 CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
654 LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}});
656 ++Cnt;
657 ++I;
660 return F;
663 llvm::Function *
664 CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
665 SourceLocation Loc) {
666 assert(
667 CapturedStmtInfo &&
668 "CapturedStmtInfo should be set when generating the captured function");
669 const CapturedDecl *CD = S.getCapturedDecl();
670 // Build the argument list.
671 bool NeedWrapperFunction =
672 getDebugInfo() && CGM.getCodeGenOpts().hasReducedDebugInfo();
673 FunctionArgList Args, WrapperArgs;
674 llvm::MapVector<const Decl *, std::pair<const VarDecl *, Address>> LocalAddrs,
675 WrapperLocalAddrs;
676 llvm::DenseMap<const Decl *, std::pair<const Expr *, llvm::Value *>> VLASizes,
677 WrapperVLASizes;
678 SmallString<256> Buffer;
679 llvm::raw_svector_ostream Out(Buffer);
680 Out << CapturedStmtInfo->getHelperName();
682 CodeGenFunction WrapperCGF(CGM, /*suppressNewContext=*/true);
683 llvm::Function *WrapperF = nullptr;
684 if (NeedWrapperFunction) {
685 // Emit the final kernel early to allow attributes to be added by the
686 // OpenMPI-IR-Builder.
687 FunctionOptions WrapperFO(&S, /*UIntPtrCastRequired=*/true,
688 /*RegisterCastedArgsOnly=*/true,
689 CapturedStmtInfo->getHelperName(), Loc);
690 WrapperCGF.CapturedStmtInfo = CapturedStmtInfo;
691 WrapperF =
692 emitOutlinedFunctionPrologue(WrapperCGF, Args, LocalAddrs, VLASizes,
693 WrapperCGF.CXXThisValue, WrapperFO);
694 Out << "_debug__";
696 FunctionOptions FO(&S, !NeedWrapperFunction, /*RegisterCastedArgsOnly=*/false,
697 Out.str(), Loc);
698 llvm::Function *F = emitOutlinedFunctionPrologue(
699 *this, WrapperArgs, WrapperLocalAddrs, WrapperVLASizes, CXXThisValue, FO);
700 CodeGenFunction::OMPPrivateScope LocalScope(*this);
701 for (const auto &LocalAddrPair : WrapperLocalAddrs) {
702 if (LocalAddrPair.second.first) {
703 LocalScope.addPrivate(LocalAddrPair.second.first,
704 LocalAddrPair.second.second);
707 (void)LocalScope.Privatize();
708 for (const auto &VLASizePair : WrapperVLASizes)
709 VLASizeMap[VLASizePair.second.first] = VLASizePair.second.second;
710 PGO.assignRegionCounters(GlobalDecl(CD), F);
711 CapturedStmtInfo->EmitBody(*this, CD->getBody());
712 (void)LocalScope.ForceCleanup();
713 FinishFunction(CD->getBodyRBrace());
714 if (!NeedWrapperFunction)
715 return F;
717 // Reverse the order.
718 WrapperF->removeFromParent();
719 F->getParent()->getFunctionList().insertAfter(F->getIterator(), WrapperF);
721 llvm::SmallVector<llvm::Value *, 4> CallArgs;
722 auto *PI = F->arg_begin();
723 for (const auto *Arg : Args) {
724 llvm::Value *CallArg;
725 auto I = LocalAddrs.find(Arg);
726 if (I != LocalAddrs.end()) {
727 LValue LV = WrapperCGF.MakeAddrLValue(
728 I->second.second,
729 I->second.first ? I->second.first->getType() : Arg->getType(),
730 AlignmentSource::Decl);
731 if (LV.getType()->isAnyComplexType())
732 LV.setAddress(LV.getAddress().withElementType(PI->getType()));
733 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
734 } else {
735 auto EI = VLASizes.find(Arg);
736 if (EI != VLASizes.end()) {
737 CallArg = EI->second.second;
738 } else {
739 LValue LV =
740 WrapperCGF.MakeAddrLValue(WrapperCGF.GetAddrOfLocalVar(Arg),
741 Arg->getType(), AlignmentSource::Decl);
742 CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
745 CallArgs.emplace_back(WrapperCGF.EmitFromMemory(CallArg, Arg->getType()));
746 ++PI;
748 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(WrapperCGF, Loc, F, CallArgs);
749 WrapperCGF.FinishFunction();
750 return WrapperF;
753 //===----------------------------------------------------------------------===//
754 // OpenMP Directive Emission
755 //===----------------------------------------------------------------------===//
756 void CodeGenFunction::EmitOMPAggregateAssign(
757 Address DestAddr, Address SrcAddr, QualType OriginalType,
758 const llvm::function_ref<void(Address, Address)> CopyGen) {
759 // Perform element-by-element initialization.
760 QualType ElementTy;
762 // Drill down to the base element type on both arrays.
763 const ArrayType *ArrayTy = OriginalType->getAsArrayTypeUnsafe();
764 llvm::Value *NumElements = emitArrayLength(ArrayTy, ElementTy, DestAddr);
765 SrcAddr = SrcAddr.withElementType(DestAddr.getElementType());
767 llvm::Value *SrcBegin = SrcAddr.emitRawPointer(*this);
768 llvm::Value *DestBegin = DestAddr.emitRawPointer(*this);
769 // Cast from pointer to array type to pointer to single element.
770 llvm::Value *DestEnd = Builder.CreateInBoundsGEP(DestAddr.getElementType(),
771 DestBegin, NumElements);
773 // The basic structure here is a while-do loop.
774 llvm::BasicBlock *BodyBB = createBasicBlock("omp.arraycpy.body");
775 llvm::BasicBlock *DoneBB = createBasicBlock("omp.arraycpy.done");
776 llvm::Value *IsEmpty =
777 Builder.CreateICmpEQ(DestBegin, DestEnd, "omp.arraycpy.isempty");
778 Builder.CreateCondBr(IsEmpty, DoneBB, BodyBB);
780 // Enter the loop body, making that address the current address.
781 llvm::BasicBlock *EntryBB = Builder.GetInsertBlock();
782 EmitBlock(BodyBB);
784 CharUnits ElementSize = getContext().getTypeSizeInChars(ElementTy);
786 llvm::PHINode *SrcElementPHI =
787 Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
788 SrcElementPHI->addIncoming(SrcBegin, EntryBB);
789 Address SrcElementCurrent =
790 Address(SrcElementPHI, SrcAddr.getElementType(),
791 SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
793 llvm::PHINode *DestElementPHI = Builder.CreatePHI(
794 DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
795 DestElementPHI->addIncoming(DestBegin, EntryBB);
796 Address DestElementCurrent =
797 Address(DestElementPHI, DestAddr.getElementType(),
798 DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
800 // Emit copy.
801 CopyGen(DestElementCurrent, SrcElementCurrent);
803 // Shift the address forward by one element.
804 llvm::Value *DestElementNext =
805 Builder.CreateConstGEP1_32(DestAddr.getElementType(), DestElementPHI,
806 /*Idx0=*/1, "omp.arraycpy.dest.element");
807 llvm::Value *SrcElementNext =
808 Builder.CreateConstGEP1_32(SrcAddr.getElementType(), SrcElementPHI,
809 /*Idx0=*/1, "omp.arraycpy.src.element");
810 // Check whether we've reached the end.
811 llvm::Value *Done =
812 Builder.CreateICmpEQ(DestElementNext, DestEnd, "omp.arraycpy.done");
813 Builder.CreateCondBr(Done, DoneBB, BodyBB);
814 DestElementPHI->addIncoming(DestElementNext, Builder.GetInsertBlock());
815 SrcElementPHI->addIncoming(SrcElementNext, Builder.GetInsertBlock());
817 // Done.
818 EmitBlock(DoneBB, /*IsFinished=*/true);
821 void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
822 Address SrcAddr, const VarDecl *DestVD,
823 const VarDecl *SrcVD, const Expr *Copy) {
824 if (OriginalType->isArrayType()) {
825 const auto *BO = dyn_cast<BinaryOperator>(Copy);
826 if (BO && BO->getOpcode() == BO_Assign) {
827 // Perform simple memcpy for simple copying.
828 LValue Dest = MakeAddrLValue(DestAddr, OriginalType);
829 LValue Src = MakeAddrLValue(SrcAddr, OriginalType);
830 EmitAggregateAssign(Dest, Src, OriginalType);
831 } else {
832 // For arrays with complex element types perform element by element
833 // copying.
834 EmitOMPAggregateAssign(
835 DestAddr, SrcAddr, OriginalType,
836 [this, Copy, SrcVD, DestVD](Address DestElement, Address SrcElement) {
837 // Working with the single array element, so have to remap
838 // destination and source variables to corresponding array
839 // elements.
840 CodeGenFunction::OMPPrivateScope Remap(*this);
841 Remap.addPrivate(DestVD, DestElement);
842 Remap.addPrivate(SrcVD, SrcElement);
843 (void)Remap.Privatize();
844 EmitIgnoredExpr(Copy);
847 } else {
848 // Remap pseudo source variable to private copy.
849 CodeGenFunction::OMPPrivateScope Remap(*this);
850 Remap.addPrivate(SrcVD, SrcAddr);
851 Remap.addPrivate(DestVD, DestAddr);
852 (void)Remap.Privatize();
853 // Emit copying of the whole variable.
854 EmitIgnoredExpr(Copy);
858 bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
859 OMPPrivateScope &PrivateScope) {
860 if (!HaveInsertPoint())
861 return false;
862 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(D);
863 bool DeviceConstTarget = getLangOpts().OpenMPIsTargetDevice &&
864 isOpenMPTargetExecutionDirective(EKind);
865 bool FirstprivateIsLastprivate = false;
866 llvm::DenseMap<const VarDecl *, OpenMPLastprivateModifier> Lastprivates;
867 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
868 for (const auto *D : C->varlist())
869 Lastprivates.try_emplace(
870 cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl(),
871 C->getKind());
873 llvm::DenseSet<const VarDecl *> EmittedAsFirstprivate;
874 llvm::SmallVector<OpenMPDirectiveKind, 4> CaptureRegions;
875 getOpenMPCaptureRegions(CaptureRegions, EKind);
876 // Force emission of the firstprivate copy if the directive does not emit
877 // outlined function, like omp for, omp simd, omp distribute etc.
878 bool MustEmitFirstprivateCopy =
879 CaptureRegions.size() == 1 && CaptureRegions.back() == OMPD_unknown;
880 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
881 const auto *IRef = C->varlist_begin();
882 const auto *InitsRef = C->inits().begin();
883 for (const Expr *IInit : C->private_copies()) {
884 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
885 bool ThisFirstprivateIsLastprivate =
886 Lastprivates.count(OrigVD->getCanonicalDecl()) > 0;
887 const FieldDecl *FD = CapturedStmtInfo->lookup(OrigVD);
888 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
889 if (!MustEmitFirstprivateCopy && !ThisFirstprivateIsLastprivate && FD &&
890 !FD->getType()->isReferenceType() &&
891 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
892 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
893 ++IRef;
894 ++InitsRef;
895 continue;
897 // Do not emit copy for firstprivate constant variables in target regions,
898 // captured by reference.
899 if (DeviceConstTarget && OrigVD->getType().isConstant(getContext()) &&
900 FD && FD->getType()->isReferenceType() &&
901 (!VD || !VD->hasAttr<OMPAllocateDeclAttr>())) {
902 EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl());
903 ++IRef;
904 ++InitsRef;
905 continue;
907 FirstprivateIsLastprivate =
908 FirstprivateIsLastprivate || ThisFirstprivateIsLastprivate;
909 if (EmittedAsFirstprivate.insert(OrigVD->getCanonicalDecl()).second) {
910 const auto *VDInit =
911 cast<VarDecl>(cast<DeclRefExpr>(*InitsRef)->getDecl());
912 bool IsRegistered;
913 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
914 /*RefersToEnclosingVariableOrCapture=*/FD != nullptr,
915 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
916 LValue OriginalLVal;
917 if (!FD) {
918 // Check if the firstprivate variable is just a constant value.
919 ConstantEmission CE = tryEmitAsConstant(&DRE);
920 if (CE && !CE.isReference()) {
921 // Constant value, no need to create a copy.
922 ++IRef;
923 ++InitsRef;
924 continue;
926 if (CE && CE.isReference()) {
927 OriginalLVal = CE.getReferenceLValue(*this, &DRE);
928 } else {
929 assert(!CE && "Expected non-constant firstprivate.");
930 OriginalLVal = EmitLValue(&DRE);
932 } else {
933 OriginalLVal = EmitLValue(&DRE);
935 QualType Type = VD->getType();
936 if (Type->isArrayType()) {
937 // Emit VarDecl with copy init for arrays.
938 // Get the address of the original variable captured in current
939 // captured region.
940 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
941 const Expr *Init = VD->getInit();
942 if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
943 // Perform simple memcpy.
944 LValue Dest = MakeAddrLValue(Emission.getAllocatedAddress(), Type);
945 EmitAggregateAssign(Dest, OriginalLVal, Type);
946 } else {
947 EmitOMPAggregateAssign(
948 Emission.getAllocatedAddress(), OriginalLVal.getAddress(), Type,
949 [this, VDInit, Init](Address DestElement, Address SrcElement) {
950 // Clean up any temporaries needed by the
951 // initialization.
952 RunCleanupsScope InitScope(*this);
953 // Emit initialization for single element.
954 setAddrOfLocalVar(VDInit, SrcElement);
955 EmitAnyExprToMem(Init, DestElement,
956 Init->getType().getQualifiers(),
957 /*IsInitializer*/ false);
958 LocalDeclMap.erase(VDInit);
961 EmitAutoVarCleanups(Emission);
962 IsRegistered =
963 PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
964 } else {
965 Address OriginalAddr = OriginalLVal.getAddress();
966 // Emit private VarDecl with copy init.
967 // Remap temp VDInit variable to the address of the original
968 // variable (for proper handling of captured global variables).
969 setAddrOfLocalVar(VDInit, OriginalAddr);
970 EmitDecl(*VD);
971 LocalDeclMap.erase(VDInit);
972 Address VDAddr = GetAddrOfLocalVar(VD);
973 if (ThisFirstprivateIsLastprivate &&
974 Lastprivates[OrigVD->getCanonicalDecl()] ==
975 OMPC_LASTPRIVATE_conditional) {
976 // Create/init special variable for lastprivate conditionals.
977 llvm::Value *V =
978 EmitLoadOfScalar(MakeAddrLValue(VDAddr, (*IRef)->getType(),
979 AlignmentSource::Decl),
980 (*IRef)->getExprLoc());
981 VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
982 *this, OrigVD);
983 EmitStoreOfScalar(V, MakeAddrLValue(VDAddr, (*IRef)->getType(),
984 AlignmentSource::Decl));
985 LocalDeclMap.erase(VD);
986 setAddrOfLocalVar(VD, VDAddr);
988 IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
990 assert(IsRegistered &&
991 "firstprivate var already registered as private");
992 // Silence the warning about unused variable.
993 (void)IsRegistered;
995 ++IRef;
996 ++InitsRef;
999 return FirstprivateIsLastprivate && !EmittedAsFirstprivate.empty();
1002 void CodeGenFunction::EmitOMPPrivateClause(
1003 const OMPExecutableDirective &D,
1004 CodeGenFunction::OMPPrivateScope &PrivateScope) {
1005 if (!HaveInsertPoint())
1006 return;
1007 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
1008 for (const auto *C : D.getClausesOfKind<OMPPrivateClause>()) {
1009 auto IRef = C->varlist_begin();
1010 for (const Expr *IInit : C->private_copies()) {
1011 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1012 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
1013 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1014 EmitDecl(*VD);
1015 // Emit private VarDecl with copy init.
1016 bool IsRegistered =
1017 PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(VD));
1018 assert(IsRegistered && "private var already registered as private");
1019 // Silence the warning about unused variable.
1020 (void)IsRegistered;
1022 ++IRef;
1027 bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
1028 if (!HaveInsertPoint())
1029 return false;
1030 // threadprivate_var1 = master_threadprivate_var1;
1031 // operator=(threadprivate_var2, master_threadprivate_var2);
1032 // ...
1033 // __kmpc_barrier(&loc, global_tid);
1034 llvm::DenseSet<const VarDecl *> CopiedVars;
1035 llvm::BasicBlock *CopyBegin = nullptr, *CopyEnd = nullptr;
1036 for (const auto *C : D.getClausesOfKind<OMPCopyinClause>()) {
1037 auto IRef = C->varlist_begin();
1038 auto ISrcRef = C->source_exprs().begin();
1039 auto IDestRef = C->destination_exprs().begin();
1040 for (const Expr *AssignOp : C->assignment_ops()) {
1041 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1042 QualType Type = VD->getType();
1043 if (CopiedVars.insert(VD->getCanonicalDecl()).second) {
1044 // Get the address of the master variable. If we are emitting code with
1045 // TLS support, the address is passed from the master as field in the
1046 // captured declaration.
1047 Address MasterAddr = Address::invalid();
1048 if (getLangOpts().OpenMPUseTLS &&
1049 getContext().getTargetInfo().isTLSSupported()) {
1050 assert(CapturedStmtInfo->lookup(VD) &&
1051 "Copyin threadprivates should have been captured!");
1052 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
1053 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
1054 MasterAddr = EmitLValue(&DRE).getAddress();
1055 LocalDeclMap.erase(VD);
1056 } else {
1057 MasterAddr =
1058 Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
1059 : CGM.GetAddrOfGlobal(VD),
1060 CGM.getTypes().ConvertTypeForMem(VD->getType()),
1061 getContext().getDeclAlign(VD));
1063 // Get the address of the threadprivate variable.
1064 Address PrivateAddr = EmitLValue(*IRef).getAddress();
1065 if (CopiedVars.size() == 1) {
1066 // At first check if current thread is a master thread. If it is, no
1067 // need to copy data.
1068 CopyBegin = createBasicBlock("copyin.not.master");
1069 CopyEnd = createBasicBlock("copyin.not.master.end");
1070 // TODO: Avoid ptrtoint conversion.
1071 auto *MasterAddrInt = Builder.CreatePtrToInt(
1072 MasterAddr.emitRawPointer(*this), CGM.IntPtrTy);
1073 auto *PrivateAddrInt = Builder.CreatePtrToInt(
1074 PrivateAddr.emitRawPointer(*this), CGM.IntPtrTy);
1075 Builder.CreateCondBr(
1076 Builder.CreateICmpNE(MasterAddrInt, PrivateAddrInt), CopyBegin,
1077 CopyEnd);
1078 EmitBlock(CopyBegin);
1080 const auto *SrcVD =
1081 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1082 const auto *DestVD =
1083 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1084 EmitOMPCopy(Type, PrivateAddr, MasterAddr, DestVD, SrcVD, AssignOp);
1086 ++IRef;
1087 ++ISrcRef;
1088 ++IDestRef;
1091 if (CopyEnd) {
1092 // Exit out of copying procedure for non-master thread.
1093 EmitBlock(CopyEnd, /*IsFinished=*/true);
1094 return true;
1096 return false;
1099 bool CodeGenFunction::EmitOMPLastprivateClauseInit(
1100 const OMPExecutableDirective &D, OMPPrivateScope &PrivateScope) {
1101 if (!HaveInsertPoint())
1102 return false;
1103 bool HasAtLeastOneLastprivate = false;
1104 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(D);
1105 llvm::DenseSet<const VarDecl *> SIMDLCVs;
1106 if (isOpenMPSimdDirective(EKind)) {
1107 const auto *LoopDirective = cast<OMPLoopDirective>(&D);
1108 for (const Expr *C : LoopDirective->counters()) {
1109 SIMDLCVs.insert(
1110 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
1113 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1114 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1115 HasAtLeastOneLastprivate = true;
1116 if (isOpenMPTaskLoopDirective(EKind) && !getLangOpts().OpenMPSimd)
1117 break;
1118 const auto *IRef = C->varlist_begin();
1119 const auto *IDestRef = C->destination_exprs().begin();
1120 for (const Expr *IInit : C->private_copies()) {
1121 // Keep the address of the original variable for future update at the end
1122 // of the loop.
1123 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1124 // Taskloops do not require additional initialization, it is done in
1125 // runtime support library.
1126 if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
1127 const auto *DestVD =
1128 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1129 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
1130 /*RefersToEnclosingVariableOrCapture=*/
1131 CapturedStmtInfo->lookup(OrigVD) != nullptr,
1132 (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
1133 PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress());
1134 // Check if the variable is also a firstprivate: in this case IInit is
1135 // not generated. Initialization of this variable will happen in codegen
1136 // for 'firstprivate' clause.
1137 if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
1138 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
1139 Address VDAddr = Address::invalid();
1140 if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
1141 VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
1142 *this, OrigVD);
1143 setAddrOfLocalVar(VD, VDAddr);
1144 } else {
1145 // Emit private VarDecl with copy init.
1146 EmitDecl(*VD);
1147 VDAddr = GetAddrOfLocalVar(VD);
1149 bool IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
1150 assert(IsRegistered &&
1151 "lastprivate var already registered as private");
1152 (void)IsRegistered;
1155 ++IRef;
1156 ++IDestRef;
1159 return HasAtLeastOneLastprivate;
1162 void CodeGenFunction::EmitOMPLastprivateClauseFinal(
1163 const OMPExecutableDirective &D, bool NoFinals,
1164 llvm::Value *IsLastIterCond) {
1165 if (!HaveInsertPoint())
1166 return;
1167 // Emit following code:
1168 // if (<IsLastIterCond>) {
1169 // orig_var1 = private_orig_var1;
1170 // ...
1171 // orig_varn = private_orig_varn;
1172 // }
1173 llvm::BasicBlock *ThenBB = nullptr;
1174 llvm::BasicBlock *DoneBB = nullptr;
1175 if (IsLastIterCond) {
1176 // Emit implicit barrier if at least one lastprivate conditional is found
1177 // and this is not a simd mode.
1178 if (!getLangOpts().OpenMPSimd &&
1179 llvm::any_of(D.getClausesOfKind<OMPLastprivateClause>(),
1180 [](const OMPLastprivateClause *C) {
1181 return C->getKind() == OMPC_LASTPRIVATE_conditional;
1182 })) {
1183 CGM.getOpenMPRuntime().emitBarrierCall(*this, D.getBeginLoc(),
1184 OMPD_unknown,
1185 /*EmitChecks=*/false,
1186 /*ForceSimpleCall=*/true);
1188 ThenBB = createBasicBlock(".omp.lastprivate.then");
1189 DoneBB = createBasicBlock(".omp.lastprivate.done");
1190 Builder.CreateCondBr(IsLastIterCond, ThenBB, DoneBB);
1191 EmitBlock(ThenBB);
1193 llvm::DenseSet<const VarDecl *> AlreadyEmittedVars;
1194 llvm::DenseMap<const VarDecl *, const Expr *> LoopCountersAndUpdates;
1195 if (const auto *LoopDirective = dyn_cast<OMPLoopDirective>(&D)) {
1196 auto IC = LoopDirective->counters().begin();
1197 for (const Expr *F : LoopDirective->finals()) {
1198 const auto *D =
1199 cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl())->getCanonicalDecl();
1200 if (NoFinals)
1201 AlreadyEmittedVars.insert(D);
1202 else
1203 LoopCountersAndUpdates[D] = F;
1204 ++IC;
1207 for (const auto *C : D.getClausesOfKind<OMPLastprivateClause>()) {
1208 auto IRef = C->varlist_begin();
1209 auto ISrcRef = C->source_exprs().begin();
1210 auto IDestRef = C->destination_exprs().begin();
1211 for (const Expr *AssignOp : C->assignment_ops()) {
1212 const auto *PrivateVD =
1213 cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
1214 QualType Type = PrivateVD->getType();
1215 const auto *CanonicalVD = PrivateVD->getCanonicalDecl();
1216 if (AlreadyEmittedVars.insert(CanonicalVD).second) {
1217 // If lastprivate variable is a loop control variable for loop-based
1218 // directive, update its value before copyin back to original
1219 // variable.
1220 if (const Expr *FinalExpr = LoopCountersAndUpdates.lookup(CanonicalVD))
1221 EmitIgnoredExpr(FinalExpr);
1222 const auto *SrcVD =
1223 cast<VarDecl>(cast<DeclRefExpr>(*ISrcRef)->getDecl());
1224 const auto *DestVD =
1225 cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
1226 // Get the address of the private variable.
1227 Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
1228 if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
1229 PrivateAddr = Address(
1230 Builder.CreateLoad(PrivateAddr),
1231 CGM.getTypes().ConvertTypeForMem(RefTy->getPointeeType()),
1232 CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
1233 // Store the last value to the private copy in the last iteration.
1234 if (C->getKind() == OMPC_LASTPRIVATE_conditional)
1235 CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
1236 *this, MakeAddrLValue(PrivateAddr, (*IRef)->getType()), PrivateVD,
1237 (*IRef)->getExprLoc());
1238 // Get the address of the original variable.
1239 Address OriginalAddr = GetAddrOfLocalVar(DestVD);
1240 EmitOMPCopy(Type, OriginalAddr, PrivateAddr, DestVD, SrcVD, AssignOp);
1242 ++IRef;
1243 ++ISrcRef;
1244 ++IDestRef;
1246 if (const Expr *PostUpdate = C->getPostUpdateExpr())
1247 EmitIgnoredExpr(PostUpdate);
1249 if (IsLastIterCond)
1250 EmitBlock(DoneBB, /*IsFinished=*/true);
1253 void CodeGenFunction::EmitOMPReductionClauseInit(
1254 const OMPExecutableDirective &D,
1255 CodeGenFunction::OMPPrivateScope &PrivateScope, bool ForInscan) {
1256 if (!HaveInsertPoint())
1257 return;
1258 SmallVector<const Expr *, 4> Shareds;
1259 SmallVector<const Expr *, 4> Privates;
1260 SmallVector<const Expr *, 4> ReductionOps;
1261 SmallVector<const Expr *, 4> LHSs;
1262 SmallVector<const Expr *, 4> RHSs;
1263 OMPTaskDataTy Data;
1264 SmallVector<const Expr *, 4> TaskLHSs;
1265 SmallVector<const Expr *, 4> TaskRHSs;
1266 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1267 if (ForInscan != (C->getModifier() == OMPC_REDUCTION_inscan))
1268 continue;
1269 Shareds.append(C->varlist_begin(), C->varlist_end());
1270 Privates.append(C->privates().begin(), C->privates().end());
1271 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1272 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1273 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1274 if (C->getModifier() == OMPC_REDUCTION_task) {
1275 Data.ReductionVars.append(C->privates().begin(), C->privates().end());
1276 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
1277 Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
1278 Data.ReductionOps.append(C->reduction_ops().begin(),
1279 C->reduction_ops().end());
1280 TaskLHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1281 TaskRHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1284 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
1285 unsigned Count = 0;
1286 auto *ILHS = LHSs.begin();
1287 auto *IRHS = RHSs.begin();
1288 auto *IPriv = Privates.begin();
1289 for (const Expr *IRef : Shareds) {
1290 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*IPriv)->getDecl());
1291 // Emit private VarDecl with reduction init.
1292 RedCG.emitSharedOrigLValue(*this, Count);
1293 RedCG.emitAggregateType(*this, Count);
1294 AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
1295 RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
1296 RedCG.getSharedLValue(Count).getAddress(),
1297 [&Emission](CodeGenFunction &CGF) {
1298 CGF.EmitAutoVarInit(Emission);
1299 return true;
1301 EmitAutoVarCleanups(Emission);
1302 Address BaseAddr = RedCG.adjustPrivateAddress(
1303 *this, Count, Emission.getAllocatedAddress());
1304 bool IsRegistered =
1305 PrivateScope.addPrivate(RedCG.getBaseDecl(Count), BaseAddr);
1306 assert(IsRegistered && "private var already registered as private");
1307 // Silence the warning about unused variable.
1308 (void)IsRegistered;
1310 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
1311 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
1312 QualType Type = PrivateVD->getType();
1313 bool isaOMPArraySectionExpr = isa<ArraySectionExpr>(IRef);
1314 if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
1315 // Store the address of the original variable associated with the LHS
1316 // implicit variable.
1317 PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
1318 PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
1319 } else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
1320 isa<ArraySubscriptExpr>(IRef)) {
1321 // Store the address of the original variable associated with the LHS
1322 // implicit variable.
1323 PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
1324 PrivateScope.addPrivate(RHSVD,
1325 GetAddrOfLocalVar(PrivateVD).withElementType(
1326 ConvertTypeForMem(RHSVD->getType())));
1327 } else {
1328 QualType Type = PrivateVD->getType();
1329 bool IsArray = getContext().getAsArrayType(Type) != nullptr;
1330 Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress();
1331 // Store the address of the original variable associated with the LHS
1332 // implicit variable.
1333 if (IsArray) {
1334 OriginalAddr =
1335 OriginalAddr.withElementType(ConvertTypeForMem(LHSVD->getType()));
1337 PrivateScope.addPrivate(LHSVD, OriginalAddr);
1338 PrivateScope.addPrivate(
1339 RHSVD, IsArray ? GetAddrOfLocalVar(PrivateVD).withElementType(
1340 ConvertTypeForMem(RHSVD->getType()))
1341 : GetAddrOfLocalVar(PrivateVD));
1343 ++ILHS;
1344 ++IRHS;
1345 ++IPriv;
1346 ++Count;
1348 if (!Data.ReductionVars.empty()) {
1349 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(D);
1350 Data.IsReductionWithTaskMod = true;
1351 Data.IsWorksharingReduction = isOpenMPWorksharingDirective(EKind);
1352 llvm::Value *ReductionDesc = CGM.getOpenMPRuntime().emitTaskReductionInit(
1353 *this, D.getBeginLoc(), TaskLHSs, TaskRHSs, Data);
1354 const Expr *TaskRedRef = nullptr;
1355 switch (EKind) {
1356 case OMPD_parallel:
1357 TaskRedRef = cast<OMPParallelDirective>(D).getTaskReductionRefExpr();
1358 break;
1359 case OMPD_for:
1360 TaskRedRef = cast<OMPForDirective>(D).getTaskReductionRefExpr();
1361 break;
1362 case OMPD_sections:
1363 TaskRedRef = cast<OMPSectionsDirective>(D).getTaskReductionRefExpr();
1364 break;
1365 case OMPD_parallel_for:
1366 TaskRedRef = cast<OMPParallelForDirective>(D).getTaskReductionRefExpr();
1367 break;
1368 case OMPD_parallel_master:
1369 TaskRedRef =
1370 cast<OMPParallelMasterDirective>(D).getTaskReductionRefExpr();
1371 break;
1372 case OMPD_parallel_sections:
1373 TaskRedRef =
1374 cast<OMPParallelSectionsDirective>(D).getTaskReductionRefExpr();
1375 break;
1376 case OMPD_target_parallel:
1377 TaskRedRef =
1378 cast<OMPTargetParallelDirective>(D).getTaskReductionRefExpr();
1379 break;
1380 case OMPD_target_parallel_for:
1381 TaskRedRef =
1382 cast<OMPTargetParallelForDirective>(D).getTaskReductionRefExpr();
1383 break;
1384 case OMPD_distribute_parallel_for:
1385 TaskRedRef =
1386 cast<OMPDistributeParallelForDirective>(D).getTaskReductionRefExpr();
1387 break;
1388 case OMPD_teams_distribute_parallel_for:
1389 TaskRedRef = cast<OMPTeamsDistributeParallelForDirective>(D)
1390 .getTaskReductionRefExpr();
1391 break;
1392 case OMPD_target_teams_distribute_parallel_for:
1393 TaskRedRef = cast<OMPTargetTeamsDistributeParallelForDirective>(D)
1394 .getTaskReductionRefExpr();
1395 break;
1396 case OMPD_simd:
1397 case OMPD_for_simd:
1398 case OMPD_section:
1399 case OMPD_single:
1400 case OMPD_master:
1401 case OMPD_critical:
1402 case OMPD_parallel_for_simd:
1403 case OMPD_task:
1404 case OMPD_taskyield:
1405 case OMPD_error:
1406 case OMPD_barrier:
1407 case OMPD_taskwait:
1408 case OMPD_taskgroup:
1409 case OMPD_flush:
1410 case OMPD_depobj:
1411 case OMPD_scan:
1412 case OMPD_ordered:
1413 case OMPD_atomic:
1414 case OMPD_teams:
1415 case OMPD_target:
1416 case OMPD_cancellation_point:
1417 case OMPD_cancel:
1418 case OMPD_target_data:
1419 case OMPD_target_enter_data:
1420 case OMPD_target_exit_data:
1421 case OMPD_taskloop:
1422 case OMPD_taskloop_simd:
1423 case OMPD_master_taskloop:
1424 case OMPD_master_taskloop_simd:
1425 case OMPD_parallel_master_taskloop:
1426 case OMPD_parallel_master_taskloop_simd:
1427 case OMPD_distribute:
1428 case OMPD_target_update:
1429 case OMPD_distribute_parallel_for_simd:
1430 case OMPD_distribute_simd:
1431 case OMPD_target_parallel_for_simd:
1432 case OMPD_target_simd:
1433 case OMPD_teams_distribute:
1434 case OMPD_teams_distribute_simd:
1435 case OMPD_teams_distribute_parallel_for_simd:
1436 case OMPD_target_teams:
1437 case OMPD_target_teams_distribute:
1438 case OMPD_target_teams_distribute_parallel_for_simd:
1439 case OMPD_target_teams_distribute_simd:
1440 case OMPD_declare_target:
1441 case OMPD_end_declare_target:
1442 case OMPD_threadprivate:
1443 case OMPD_allocate:
1444 case OMPD_declare_reduction:
1445 case OMPD_declare_mapper:
1446 case OMPD_declare_simd:
1447 case OMPD_requires:
1448 case OMPD_declare_variant:
1449 case OMPD_begin_declare_variant:
1450 case OMPD_end_declare_variant:
1451 case OMPD_unknown:
1452 default:
1453 llvm_unreachable("Unexpected directive with task reductions.");
1456 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(TaskRedRef)->getDecl());
1457 EmitVarDecl(*VD);
1458 EmitStoreOfScalar(ReductionDesc, GetAddrOfLocalVar(VD),
1459 /*Volatile=*/false, TaskRedRef->getType());
1463 void CodeGenFunction::EmitOMPReductionClauseFinal(
1464 const OMPExecutableDirective &D, const OpenMPDirectiveKind ReductionKind) {
1465 if (!HaveInsertPoint())
1466 return;
1467 llvm::SmallVector<const Expr *, 8> Privates;
1468 llvm::SmallVector<const Expr *, 8> LHSExprs;
1469 llvm::SmallVector<const Expr *, 8> RHSExprs;
1470 llvm::SmallVector<const Expr *, 8> ReductionOps;
1471 bool HasAtLeastOneReduction = false;
1472 bool IsReductionWithTaskMod = false;
1473 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1474 // Do not emit for inscan reductions.
1475 if (C->getModifier() == OMPC_REDUCTION_inscan)
1476 continue;
1477 HasAtLeastOneReduction = true;
1478 Privates.append(C->privates().begin(), C->privates().end());
1479 LHSExprs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
1480 RHSExprs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
1481 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
1482 IsReductionWithTaskMod =
1483 IsReductionWithTaskMod || C->getModifier() == OMPC_REDUCTION_task;
1485 if (HasAtLeastOneReduction) {
1486 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(D);
1487 if (IsReductionWithTaskMod) {
1488 CGM.getOpenMPRuntime().emitTaskReductionFini(
1489 *this, D.getBeginLoc(), isOpenMPWorksharingDirective(EKind));
1491 bool TeamsLoopCanBeParallel = false;
1492 if (auto *TTLD = dyn_cast<OMPTargetTeamsGenericLoopDirective>(&D))
1493 TeamsLoopCanBeParallel = TTLD->canBeParallelFor();
1494 bool WithNowait = D.getSingleClause<OMPNowaitClause>() ||
1495 isOpenMPParallelDirective(EKind) ||
1496 TeamsLoopCanBeParallel || ReductionKind == OMPD_simd;
1497 bool SimpleReduction = ReductionKind == OMPD_simd;
1498 // Emit nowait reduction if nowait clause is present or directive is a
1499 // parallel directive (it always has implicit barrier).
1500 CGM.getOpenMPRuntime().emitReduction(
1501 *this, D.getEndLoc(), Privates, LHSExprs, RHSExprs, ReductionOps,
1502 {WithNowait, SimpleReduction, ReductionKind});
1506 static void emitPostUpdateForReductionClause(
1507 CodeGenFunction &CGF, const OMPExecutableDirective &D,
1508 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
1509 if (!CGF.HaveInsertPoint())
1510 return;
1511 llvm::BasicBlock *DoneBB = nullptr;
1512 for (const auto *C : D.getClausesOfKind<OMPReductionClause>()) {
1513 if (const Expr *PostUpdate = C->getPostUpdateExpr()) {
1514 if (!DoneBB) {
1515 if (llvm::Value *Cond = CondGen(CGF)) {
1516 // If the first post-update expression is found, emit conditional
1517 // block if it was requested.
1518 llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".omp.reduction.pu");
1519 DoneBB = CGF.createBasicBlock(".omp.reduction.pu.done");
1520 CGF.Builder.CreateCondBr(Cond, ThenBB, DoneBB);
1521 CGF.EmitBlock(ThenBB);
1524 CGF.EmitIgnoredExpr(PostUpdate);
1527 if (DoneBB)
1528 CGF.EmitBlock(DoneBB, /*IsFinished=*/true);
1531 namespace {
1532 /// Codegen lambda for appending distribute lower and upper bounds to outlined
1533 /// parallel function. This is necessary for combined constructs such as
1534 /// 'distribute parallel for'
1535 typedef llvm::function_ref<void(CodeGenFunction &,
1536 const OMPExecutableDirective &,
1537 llvm::SmallVectorImpl<llvm::Value *> &)>
1538 CodeGenBoundParametersTy;
1539 } // anonymous namespace
1541 static void
1542 checkForLastprivateConditionalUpdate(CodeGenFunction &CGF,
1543 const OMPExecutableDirective &S) {
1544 if (CGF.getLangOpts().OpenMP < 50)
1545 return;
1546 llvm::DenseSet<CanonicalDeclPtr<const VarDecl>> PrivateDecls;
1547 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
1548 for (const Expr *Ref : C->varlist()) {
1549 if (!Ref->getType()->isScalarType())
1550 continue;
1551 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1552 if (!DRE)
1553 continue;
1554 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1555 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1558 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
1559 for (const Expr *Ref : C->varlist()) {
1560 if (!Ref->getType()->isScalarType())
1561 continue;
1562 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1563 if (!DRE)
1564 continue;
1565 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1566 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1569 for (const auto *C : S.getClausesOfKind<OMPLinearClause>()) {
1570 for (const Expr *Ref : C->varlist()) {
1571 if (!Ref->getType()->isScalarType())
1572 continue;
1573 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1574 if (!DRE)
1575 continue;
1576 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1577 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, Ref);
1580 // Privates should ne analyzed since they are not captured at all.
1581 // Task reductions may be skipped - tasks are ignored.
1582 // Firstprivates do not return value but may be passed by reference - no need
1583 // to check for updated lastprivate conditional.
1584 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
1585 for (const Expr *Ref : C->varlist()) {
1586 if (!Ref->getType()->isScalarType())
1587 continue;
1588 const auto *DRE = dyn_cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
1589 if (!DRE)
1590 continue;
1591 PrivateDecls.insert(cast<VarDecl>(DRE->getDecl()));
1594 CGF.CGM.getOpenMPRuntime().checkAndEmitSharedLastprivateConditional(
1595 CGF, S, PrivateDecls);
1598 static void emitCommonOMPParallelDirective(
1599 CodeGenFunction &CGF, const OMPExecutableDirective &S,
1600 OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
1601 const CodeGenBoundParametersTy &CodeGenBoundParameters) {
1602 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1603 llvm::Value *NumThreads = nullptr;
1604 llvm::Function *OutlinedFn =
1605 CGF.CGM.getOpenMPRuntime().emitParallelOutlinedFunction(
1606 CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind,
1607 CodeGen);
1608 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>()) {
1609 CodeGenFunction::RunCleanupsScope NumThreadsScope(CGF);
1610 NumThreads = CGF.EmitScalarExpr(NumThreadsClause->getNumThreads(),
1611 /*IgnoreResultAssign=*/true);
1612 CGF.CGM.getOpenMPRuntime().emitNumThreadsClause(
1613 CGF, NumThreads, NumThreadsClause->getBeginLoc());
1615 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>()) {
1616 CodeGenFunction::RunCleanupsScope ProcBindScope(CGF);
1617 CGF.CGM.getOpenMPRuntime().emitProcBindClause(
1618 CGF, ProcBindClause->getProcBindKind(), ProcBindClause->getBeginLoc());
1620 const Expr *IfCond = nullptr;
1621 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
1622 if (C->getNameModifier() == OMPD_unknown ||
1623 C->getNameModifier() == OMPD_parallel) {
1624 IfCond = C->getCondition();
1625 break;
1629 OMPParallelScope Scope(CGF, S);
1630 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
1631 // Combining 'distribute' with 'for' requires sharing each 'distribute' chunk
1632 // lower and upper bounds with the pragma 'for' chunking mechanism.
1633 // The following lambda takes care of appending the lower and upper bound
1634 // parameters when necessary
1635 CodeGenBoundParameters(CGF, S, CapturedVars);
1636 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
1637 CGF.CGM.getOpenMPRuntime().emitParallelCall(CGF, S.getBeginLoc(), OutlinedFn,
1638 CapturedVars, IfCond, NumThreads);
1641 static bool isAllocatableDecl(const VarDecl *VD) {
1642 const VarDecl *CVD = VD->getCanonicalDecl();
1643 if (!CVD->hasAttr<OMPAllocateDeclAttr>())
1644 return false;
1645 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1646 // Use the default allocation.
1647 return !((AA->getAllocatorType() == OMPAllocateDeclAttr::OMPDefaultMemAlloc ||
1648 AA->getAllocatorType() == OMPAllocateDeclAttr::OMPNullMemAlloc) &&
1649 !AA->getAllocator());
1652 static void emitEmptyBoundParameters(CodeGenFunction &,
1653 const OMPExecutableDirective &,
1654 llvm::SmallVectorImpl<llvm::Value *> &) {}
1656 static void emitOMPCopyinClause(CodeGenFunction &CGF,
1657 const OMPExecutableDirective &S) {
1658 bool Copyins = CGF.EmitOMPCopyinClause(S);
1659 if (Copyins) {
1660 // Emit implicit barrier to synchronize threads and avoid data races on
1661 // propagation master's thread values of threadprivate variables to local
1662 // instances of that variables of all other implicit threads.
1663 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
1664 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
1665 /*ForceSimpleCall=*/true);
1669 Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
1670 CodeGenFunction &CGF, const VarDecl *VD) {
1671 CodeGenModule &CGM = CGF.CGM;
1672 auto &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1674 if (!VD)
1675 return Address::invalid();
1676 const VarDecl *CVD = VD->getCanonicalDecl();
1677 if (!isAllocatableDecl(CVD))
1678 return Address::invalid();
1679 llvm::Value *Size;
1680 CharUnits Align = CGM.getContext().getDeclAlign(CVD);
1681 if (CVD->getType()->isVariablyModifiedType()) {
1682 Size = CGF.getTypeSize(CVD->getType());
1683 // Align the size: ((size + align - 1) / align) * align
1684 Size = CGF.Builder.CreateNUWAdd(
1685 Size, CGM.getSize(Align - CharUnits::fromQuantity(1)));
1686 Size = CGF.Builder.CreateUDiv(Size, CGM.getSize(Align));
1687 Size = CGF.Builder.CreateNUWMul(Size, CGM.getSize(Align));
1688 } else {
1689 CharUnits Sz = CGM.getContext().getTypeSizeInChars(CVD->getType());
1690 Size = CGM.getSize(Sz.alignTo(Align));
1693 const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
1694 assert(AA->getAllocator() &&
1695 "Expected allocator expression for non-default allocator.");
1696 llvm::Value *Allocator = CGF.EmitScalarExpr(AA->getAllocator());
1697 // According to the standard, the original allocator type is a enum (integer).
1698 // Convert to pointer type, if required.
1699 if (Allocator->getType()->isIntegerTy())
1700 Allocator = CGF.Builder.CreateIntToPtr(Allocator, CGM.VoidPtrTy);
1701 else if (Allocator->getType()->isPointerTy())
1702 Allocator = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Allocator,
1703 CGM.VoidPtrTy);
1705 llvm::Value *Addr = OMPBuilder.createOMPAlloc(
1706 CGF.Builder, Size, Allocator,
1707 getNameWithSeparators({CVD->getName(), ".void.addr"}, ".", "."));
1708 llvm::CallInst *FreeCI =
1709 OMPBuilder.createOMPFree(CGF.Builder, Addr, Allocator);
1711 CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(NormalAndEHCleanup, FreeCI);
1712 Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
1713 Addr,
1714 CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
1715 getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
1716 return Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
1719 Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
1720 CodeGenFunction &CGF, const VarDecl *VD, Address VDAddr,
1721 SourceLocation Loc) {
1722 CodeGenModule &CGM = CGF.CGM;
1723 if (CGM.getLangOpts().OpenMPUseTLS &&
1724 CGM.getContext().getTargetInfo().isTLSSupported())
1725 return VDAddr;
1727 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1729 llvm::Type *VarTy = VDAddr.getElementType();
1730 llvm::Value *Data =
1731 CGF.Builder.CreatePointerCast(VDAddr.emitRawPointer(CGF), CGM.Int8PtrTy);
1732 llvm::ConstantInt *Size = CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy));
1733 std::string Suffix = getNameWithSeparators({"cache", ""});
1734 llvm::Twine CacheName = Twine(CGM.getMangledName(VD)).concat(Suffix);
1736 llvm::CallInst *ThreadPrivateCacheCall =
1737 OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
1739 return Address(ThreadPrivateCacheCall, CGM.Int8Ty, VDAddr.getAlignment());
1742 std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
1743 ArrayRef<StringRef> Parts, StringRef FirstSeparator, StringRef Separator) {
1744 SmallString<128> Buffer;
1745 llvm::raw_svector_ostream OS(Buffer);
1746 StringRef Sep = FirstSeparator;
1747 for (StringRef Part : Parts) {
1748 OS << Sep << Part;
1749 Sep = Separator;
1751 return OS.str().str();
1754 void CodeGenFunction::OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
1755 CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
1756 InsertPointTy CodeGenIP, Twine RegionName) {
1757 CGBuilderTy &Builder = CGF.Builder;
1758 Builder.restoreIP(CodeGenIP);
1759 llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
1760 "." + RegionName + ".after");
1763 OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
1764 CGF.EmitStmt(RegionBodyStmt);
1767 if (Builder.saveIP().isSet())
1768 Builder.CreateBr(FiniBB);
1771 void CodeGenFunction::OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
1772 CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
1773 InsertPointTy CodeGenIP, Twine RegionName) {
1774 CGBuilderTy &Builder = CGF.Builder;
1775 Builder.restoreIP(CodeGenIP);
1776 llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
1777 "." + RegionName + ".after");
1780 OMPBuilderCBHelpers::OutlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
1781 CGF.EmitStmt(RegionBodyStmt);
1784 if (Builder.saveIP().isSet())
1785 Builder.CreateBr(FiniBB);
1788 void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
1789 if (CGM.getLangOpts().OpenMPIRBuilder) {
1790 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
1791 // Check if we have any if clause associated with the directive.
1792 llvm::Value *IfCond = nullptr;
1793 if (const auto *C = S.getSingleClause<OMPIfClause>())
1794 IfCond = EmitScalarExpr(C->getCondition(),
1795 /*IgnoreResultAssign=*/true);
1797 llvm::Value *NumThreads = nullptr;
1798 if (const auto *NumThreadsClause = S.getSingleClause<OMPNumThreadsClause>())
1799 NumThreads = EmitScalarExpr(NumThreadsClause->getNumThreads(),
1800 /*IgnoreResultAssign=*/true);
1802 ProcBindKind ProcBind = OMP_PROC_BIND_default;
1803 if (const auto *ProcBindClause = S.getSingleClause<OMPProcBindClause>())
1804 ProcBind = ProcBindClause->getProcBindKind();
1806 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
1808 // The cleanup callback that finalizes all variables at the given location,
1809 // thus calls destructors etc.
1810 auto FiniCB = [this](InsertPointTy IP) {
1811 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
1812 return llvm::Error::success();
1815 // Privatization callback that performs appropriate action for
1816 // shared/private/firstprivate/lastprivate/copyin/... variables.
1818 // TODO: This defaults to shared right now.
1819 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
1820 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
1821 // The next line is appropriate only for variables (Val) with the
1822 // data-sharing attribute "shared".
1823 ReplVal = &Val;
1825 return CodeGenIP;
1828 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
1829 const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt();
1831 auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
1832 InsertPointTy CodeGenIP) {
1833 OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
1834 *this, ParallelRegionBodyStmt, AllocaIP, CodeGenIP, "parallel");
1835 return llvm::Error::success();
1838 CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
1839 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
1840 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
1841 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
1842 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
1843 OMPBuilder.createParallel(Builder, AllocaIP, BodyGenCB, PrivCB, FiniCB,
1844 IfCond, NumThreads, ProcBind, S.hasCancel());
1845 assert(AfterIP && "unexpected error creating parallel");
1846 Builder.restoreIP(*AfterIP);
1847 return;
1850 // Emit parallel region as a standalone region.
1851 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
1852 Action.Enter(CGF);
1853 OMPPrivateScope PrivateScope(CGF);
1854 emitOMPCopyinClause(CGF, S);
1855 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
1856 CGF.EmitOMPPrivateClause(S, PrivateScope);
1857 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
1858 (void)PrivateScope.Privatize();
1859 CGF.EmitStmt(S.getCapturedStmt(OMPD_parallel)->getCapturedStmt());
1860 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
1863 auto LPCRegion =
1864 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
1865 emitCommonOMPParallelDirective(*this, S, OMPD_parallel, CodeGen,
1866 emitEmptyBoundParameters);
1867 emitPostUpdateForReductionClause(*this, S,
1868 [](CodeGenFunction &) { return nullptr; });
1870 // Check for outer lastprivate conditional update.
1871 checkForLastprivateConditionalUpdate(*this, S);
1874 void CodeGenFunction::EmitOMPMetaDirective(const OMPMetaDirective &S) {
1875 EmitStmt(S.getIfStmt());
1878 namespace {
1879 /// RAII to handle scopes for loop transformation directives.
1880 class OMPTransformDirectiveScopeRAII {
1881 OMPLoopScope *Scope = nullptr;
1882 CodeGenFunction::CGCapturedStmtInfo *CGSI = nullptr;
1883 CodeGenFunction::CGCapturedStmtRAII *CapInfoRAII = nullptr;
1885 OMPTransformDirectiveScopeRAII(const OMPTransformDirectiveScopeRAII &) =
1886 delete;
1887 OMPTransformDirectiveScopeRAII &
1888 operator=(const OMPTransformDirectiveScopeRAII &) = delete;
1890 public:
1891 OMPTransformDirectiveScopeRAII(CodeGenFunction &CGF, const Stmt *S) {
1892 if (const auto *Dir = dyn_cast<OMPLoopBasedDirective>(S)) {
1893 Scope = new OMPLoopScope(CGF, *Dir);
1894 CGSI = new CodeGenFunction::CGCapturedStmtInfo(CR_OpenMP);
1895 CapInfoRAII = new CodeGenFunction::CGCapturedStmtRAII(CGF, CGSI);
1898 ~OMPTransformDirectiveScopeRAII() {
1899 if (!Scope)
1900 return;
1901 delete CapInfoRAII;
1902 delete CGSI;
1903 delete Scope;
1906 } // namespace
1908 static void emitBody(CodeGenFunction &CGF, const Stmt *S, const Stmt *NextLoop,
1909 int MaxLevel, int Level = 0) {
1910 assert(Level < MaxLevel && "Too deep lookup during loop body codegen.");
1911 const Stmt *SimplifiedS = S->IgnoreContainers();
1912 if (const auto *CS = dyn_cast<CompoundStmt>(SimplifiedS)) {
1913 PrettyStackTraceLoc CrashInfo(
1914 CGF.getContext().getSourceManager(), CS->getLBracLoc(),
1915 "LLVM IR generation of compound statement ('{}')");
1917 // Keep track of the current cleanup stack depth, including debug scopes.
1918 CodeGenFunction::LexicalScope Scope(CGF, S->getSourceRange());
1919 for (const Stmt *CurStmt : CS->body())
1920 emitBody(CGF, CurStmt, NextLoop, MaxLevel, Level);
1921 return;
1923 if (SimplifiedS == NextLoop) {
1924 if (auto *Dir = dyn_cast<OMPLoopTransformationDirective>(SimplifiedS))
1925 SimplifiedS = Dir->getTransformedStmt();
1926 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(SimplifiedS))
1927 SimplifiedS = CanonLoop->getLoopStmt();
1928 if (const auto *For = dyn_cast<ForStmt>(SimplifiedS)) {
1929 S = For->getBody();
1930 } else {
1931 assert(isa<CXXForRangeStmt>(SimplifiedS) &&
1932 "Expected canonical for loop or range-based for loop.");
1933 const auto *CXXFor = cast<CXXForRangeStmt>(SimplifiedS);
1934 CGF.EmitStmt(CXXFor->getLoopVarStmt());
1935 S = CXXFor->getBody();
1937 if (Level + 1 < MaxLevel) {
1938 NextLoop = OMPLoopDirective::tryToFindNextInnerLoop(
1939 S, /*TryImperfectlyNestedLoops=*/true);
1940 emitBody(CGF, S, NextLoop, MaxLevel, Level + 1);
1941 return;
1944 CGF.EmitStmt(S);
1947 void CodeGenFunction::EmitOMPLoopBody(const OMPLoopDirective &D,
1948 JumpDest LoopExit) {
1949 RunCleanupsScope BodyScope(*this);
1950 // Update counters values on current iteration.
1951 for (const Expr *UE : D.updates())
1952 EmitIgnoredExpr(UE);
1953 // Update the linear variables.
1954 // In distribute directives only loop counters may be marked as linear, no
1955 // need to generate the code for them.
1956 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(D);
1957 if (!isOpenMPDistributeDirective(EKind)) {
1958 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
1959 for (const Expr *UE : C->updates())
1960 EmitIgnoredExpr(UE);
1964 // On a continue in the body, jump to the end.
1965 JumpDest Continue = getJumpDestInCurrentScope("omp.body.continue");
1966 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1967 for (const Expr *E : D.finals_conditions()) {
1968 if (!E)
1969 continue;
1970 // Check that loop counter in non-rectangular nest fits into the iteration
1971 // space.
1972 llvm::BasicBlock *NextBB = createBasicBlock("omp.body.next");
1973 EmitBranchOnBoolExpr(E, NextBB, Continue.getBlock(),
1974 getProfileCount(D.getBody()));
1975 EmitBlock(NextBB);
1978 OMPPrivateScope InscanScope(*this);
1979 EmitOMPReductionClauseInit(D, InscanScope, /*ForInscan=*/true);
1980 bool IsInscanRegion = InscanScope.Privatize();
1981 if (IsInscanRegion) {
1982 // Need to remember the block before and after scan directive
1983 // to dispatch them correctly depending on the clause used in
1984 // this directive, inclusive or exclusive. For inclusive scan the natural
1985 // order of the blocks is used, for exclusive clause the blocks must be
1986 // executed in reverse order.
1987 OMPBeforeScanBlock = createBasicBlock("omp.before.scan.bb");
1988 OMPAfterScanBlock = createBasicBlock("omp.after.scan.bb");
1989 // No need to allocate inscan exit block, in simd mode it is selected in the
1990 // codegen for the scan directive.
1991 if (EKind != OMPD_simd && !getLangOpts().OpenMPSimd)
1992 OMPScanExitBlock = createBasicBlock("omp.exit.inscan.bb");
1993 OMPScanDispatch = createBasicBlock("omp.inscan.dispatch");
1994 EmitBranch(OMPScanDispatch);
1995 EmitBlock(OMPBeforeScanBlock);
1998 // Emit loop variables for C++ range loops.
1999 const Stmt *Body =
2000 D.getInnermostCapturedStmt()->getCapturedStmt()->IgnoreContainers();
2001 // Emit loop body.
2002 emitBody(*this, Body,
2003 OMPLoopBasedDirective::tryToFindNextInnerLoop(
2004 Body, /*TryImperfectlyNestedLoops=*/true),
2005 D.getLoopsNumber());
2007 // Jump to the dispatcher at the end of the loop body.
2008 if (IsInscanRegion)
2009 EmitBranch(OMPScanExitBlock);
2011 // The end (updates/cleanups).
2012 EmitBlock(Continue.getBlock());
2013 BreakContinueStack.pop_back();
2016 using EmittedClosureTy = std::pair<llvm::Function *, llvm::Value *>;
2018 /// Emit a captured statement and return the function as well as its captured
2019 /// closure context.
2020 static EmittedClosureTy emitCapturedStmtFunc(CodeGenFunction &ParentCGF,
2021 const CapturedStmt *S) {
2022 LValue CapStruct = ParentCGF.InitCapturedStruct(*S);
2023 CodeGenFunction CGF(ParentCGF.CGM, /*suppressNewContext=*/true);
2024 std::unique_ptr<CodeGenFunction::CGCapturedStmtInfo> CSI =
2025 std::make_unique<CodeGenFunction::CGCapturedStmtInfo>(*S);
2026 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, CSI.get());
2027 llvm::Function *F = CGF.GenerateCapturedStmtFunction(*S);
2029 return {F, CapStruct.getPointer(ParentCGF)};
2032 /// Emit a call to a previously captured closure.
2033 static llvm::CallInst *
2034 emitCapturedStmtCall(CodeGenFunction &ParentCGF, EmittedClosureTy Cap,
2035 llvm::ArrayRef<llvm::Value *> Args) {
2036 // Append the closure context to the argument.
2037 SmallVector<llvm::Value *> EffectiveArgs;
2038 EffectiveArgs.reserve(Args.size() + 1);
2039 llvm::append_range(EffectiveArgs, Args);
2040 EffectiveArgs.push_back(Cap.second);
2042 return ParentCGF.Builder.CreateCall(Cap.first, EffectiveArgs);
2045 llvm::CanonicalLoopInfo *
2046 CodeGenFunction::EmitOMPCollapsedCanonicalLoopNest(const Stmt *S, int Depth) {
2047 assert(Depth == 1 && "Nested loops with OpenMPIRBuilder not yet implemented");
2049 // The caller is processing the loop-associated directive processing the \p
2050 // Depth loops nested in \p S. Put the previous pending loop-associated
2051 // directive to the stack. If the current loop-associated directive is a loop
2052 // transformation directive, it will push its generated loops onto the stack
2053 // such that together with the loops left here they form the combined loop
2054 // nest for the parent loop-associated directive.
2055 int ParentExpectedOMPLoopDepth = ExpectedOMPLoopDepth;
2056 ExpectedOMPLoopDepth = Depth;
2058 EmitStmt(S);
2059 assert(OMPLoopNestStack.size() >= (size_t)Depth && "Found too few loops");
2061 // The last added loop is the outermost one.
2062 llvm::CanonicalLoopInfo *Result = OMPLoopNestStack.back();
2064 // Pop the \p Depth loops requested by the call from that stack and restore
2065 // the previous context.
2066 OMPLoopNestStack.pop_back_n(Depth);
2067 ExpectedOMPLoopDepth = ParentExpectedOMPLoopDepth;
2069 return Result;
2072 void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
2073 const Stmt *SyntacticalLoop = S->getLoopStmt();
2074 if (!getLangOpts().OpenMPIRBuilder) {
2075 // Ignore if OpenMPIRBuilder is not enabled.
2076 EmitStmt(SyntacticalLoop);
2077 return;
2080 LexicalScope ForScope(*this, S->getSourceRange());
2082 // Emit init statements. The Distance/LoopVar funcs may reference variable
2083 // declarations they contain.
2084 const Stmt *BodyStmt;
2085 if (const auto *For = dyn_cast<ForStmt>(SyntacticalLoop)) {
2086 if (const Stmt *InitStmt = For->getInit())
2087 EmitStmt(InitStmt);
2088 BodyStmt = For->getBody();
2089 } else if (const auto *RangeFor =
2090 dyn_cast<CXXForRangeStmt>(SyntacticalLoop)) {
2091 if (const DeclStmt *RangeStmt = RangeFor->getRangeStmt())
2092 EmitStmt(RangeStmt);
2093 if (const DeclStmt *BeginStmt = RangeFor->getBeginStmt())
2094 EmitStmt(BeginStmt);
2095 if (const DeclStmt *EndStmt = RangeFor->getEndStmt())
2096 EmitStmt(EndStmt);
2097 if (const DeclStmt *LoopVarStmt = RangeFor->getLoopVarStmt())
2098 EmitStmt(LoopVarStmt);
2099 BodyStmt = RangeFor->getBody();
2100 } else
2101 llvm_unreachable("Expected for-stmt or range-based for-stmt");
2103 // Emit closure for later use. By-value captures will be captured here.
2104 const CapturedStmt *DistanceFunc = S->getDistanceFunc();
2105 EmittedClosureTy DistanceClosure = emitCapturedStmtFunc(*this, DistanceFunc);
2106 const CapturedStmt *LoopVarFunc = S->getLoopVarFunc();
2107 EmittedClosureTy LoopVarClosure = emitCapturedStmtFunc(*this, LoopVarFunc);
2109 // Call the distance function to get the number of iterations of the loop to
2110 // come.
2111 QualType LogicalTy = DistanceFunc->getCapturedDecl()
2112 ->getParam(0)
2113 ->getType()
2114 .getNonReferenceType();
2115 RawAddress CountAddr = CreateMemTemp(LogicalTy, ".count.addr");
2116 emitCapturedStmtCall(*this, DistanceClosure, {CountAddr.getPointer()});
2117 llvm::Value *DistVal = Builder.CreateLoad(CountAddr, ".count");
2119 // Emit the loop structure.
2120 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
2121 auto BodyGen = [&, this](llvm::OpenMPIRBuilder::InsertPointTy CodeGenIP,
2122 llvm::Value *IndVar) {
2123 Builder.restoreIP(CodeGenIP);
2125 // Emit the loop body: Convert the logical iteration number to the loop
2126 // variable and emit the body.
2127 const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
2128 LValue LCVal = EmitLValue(LoopVarRef);
2129 Address LoopVarAddress = LCVal.getAddress();
2130 emitCapturedStmtCall(*this, LoopVarClosure,
2131 {LoopVarAddress.emitRawPointer(*this), IndVar});
2133 RunCleanupsScope BodyScope(*this);
2134 EmitStmt(BodyStmt);
2135 return llvm::Error::success();
2138 llvm::Expected<llvm::CanonicalLoopInfo *> Result =
2139 OMPBuilder.createCanonicalLoop(Builder, BodyGen, DistVal);
2140 assert(Result && "unexpected error creating canonical loop");
2141 llvm::CanonicalLoopInfo *CL = *Result;
2143 // Finish up the loop.
2144 Builder.restoreIP(CL->getAfterIP());
2145 ForScope.ForceCleanup();
2147 // Remember the CanonicalLoopInfo for parent AST nodes consuming it.
2148 OMPLoopNestStack.push_back(CL);
2151 void CodeGenFunction::EmitOMPInnerLoop(
2152 const OMPExecutableDirective &S, bool RequiresCleanup, const Expr *LoopCond,
2153 const Expr *IncExpr,
2154 const llvm::function_ref<void(CodeGenFunction &)> BodyGen,
2155 const llvm::function_ref<void(CodeGenFunction &)> PostIncGen) {
2156 auto LoopExit = getJumpDestInCurrentScope("omp.inner.for.end");
2158 // Start the loop with a block that tests the condition.
2159 auto CondBlock = createBasicBlock("omp.inner.for.cond");
2160 EmitBlock(CondBlock);
2161 const SourceRange R = S.getSourceRange();
2163 // If attributes are attached, push to the basic block with them.
2164 const auto &OMPED = cast<OMPExecutableDirective>(S);
2165 const CapturedStmt *ICS = OMPED.getInnermostCapturedStmt();
2166 const Stmt *SS = ICS->getCapturedStmt();
2167 const AttributedStmt *AS = dyn_cast_or_null<AttributedStmt>(SS);
2168 OMPLoopNestStack.clear();
2169 if (AS)
2170 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(),
2171 AS->getAttrs(), SourceLocToDebugLoc(R.getBegin()),
2172 SourceLocToDebugLoc(R.getEnd()));
2173 else
2174 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2175 SourceLocToDebugLoc(R.getEnd()));
2177 // If there are any cleanups between here and the loop-exit scope,
2178 // create a block to stage a loop exit along.
2179 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2180 if (RequiresCleanup)
2181 ExitBlock = createBasicBlock("omp.inner.for.cond.cleanup");
2183 llvm::BasicBlock *LoopBody = createBasicBlock("omp.inner.for.body");
2185 // Emit condition.
2186 EmitBranchOnBoolExpr(LoopCond, LoopBody, ExitBlock, getProfileCount(&S));
2187 if (ExitBlock != LoopExit.getBlock()) {
2188 EmitBlock(ExitBlock);
2189 EmitBranchThroughCleanup(LoopExit);
2192 EmitBlock(LoopBody);
2193 incrementProfileCounter(&S);
2195 // Create a block for the increment.
2196 JumpDest Continue = getJumpDestInCurrentScope("omp.inner.for.inc");
2197 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2199 BodyGen(*this);
2201 // Emit "IV = IV + 1" and a back-edge to the condition block.
2202 EmitBlock(Continue.getBlock());
2203 EmitIgnoredExpr(IncExpr);
2204 PostIncGen(*this);
2205 BreakContinueStack.pop_back();
2206 EmitBranch(CondBlock);
2207 LoopStack.pop();
2208 // Emit the fall-through block.
2209 EmitBlock(LoopExit.getBlock());
2212 bool CodeGenFunction::EmitOMPLinearClauseInit(const OMPLoopDirective &D) {
2213 if (!HaveInsertPoint())
2214 return false;
2215 // Emit inits for the linear variables.
2216 bool HasLinears = false;
2217 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2218 for (const Expr *Init : C->inits()) {
2219 HasLinears = true;
2220 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(Init)->getDecl());
2221 if (const auto *Ref =
2222 dyn_cast<DeclRefExpr>(VD->getInit()->IgnoreImpCasts())) {
2223 AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
2224 const auto *OrigVD = cast<VarDecl>(Ref->getDecl());
2225 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
2226 CapturedStmtInfo->lookup(OrigVD) != nullptr,
2227 VD->getInit()->getType(), VK_LValue,
2228 VD->getInit()->getExprLoc());
2229 EmitExprAsInit(
2230 &DRE, VD,
2231 MakeAddrLValue(Emission.getAllocatedAddress(), VD->getType()),
2232 /*capturedByInit=*/false);
2233 EmitAutoVarCleanups(Emission);
2234 } else {
2235 EmitVarDecl(*VD);
2238 // Emit the linear steps for the linear clauses.
2239 // If a step is not constant, it is pre-calculated before the loop.
2240 if (const auto *CS = cast_or_null<BinaryOperator>(C->getCalcStep()))
2241 if (const auto *SaveRef = cast<DeclRefExpr>(CS->getLHS())) {
2242 EmitVarDecl(*cast<VarDecl>(SaveRef->getDecl()));
2243 // Emit calculation of the linear step.
2244 EmitIgnoredExpr(CS);
2247 return HasLinears;
2250 void CodeGenFunction::EmitOMPLinearClauseFinal(
2251 const OMPLoopDirective &D,
2252 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2253 if (!HaveInsertPoint())
2254 return;
2255 llvm::BasicBlock *DoneBB = nullptr;
2256 // Emit the final values of the linear variables.
2257 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2258 auto IC = C->varlist_begin();
2259 for (const Expr *F : C->finals()) {
2260 if (!DoneBB) {
2261 if (llvm::Value *Cond = CondGen(*this)) {
2262 // If the first post-update expression is found, emit conditional
2263 // block if it was requested.
2264 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.linear.pu");
2265 DoneBB = createBasicBlock(".omp.linear.pu.done");
2266 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2267 EmitBlock(ThenBB);
2270 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IC)->getDecl());
2271 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
2272 CapturedStmtInfo->lookup(OrigVD) != nullptr,
2273 (*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
2274 Address OrigAddr = EmitLValue(&DRE).getAddress();
2275 CodeGenFunction::OMPPrivateScope VarScope(*this);
2276 VarScope.addPrivate(OrigVD, OrigAddr);
2277 (void)VarScope.Privatize();
2278 EmitIgnoredExpr(F);
2279 ++IC;
2281 if (const Expr *PostUpdate = C->getPostUpdateExpr())
2282 EmitIgnoredExpr(PostUpdate);
2284 if (DoneBB)
2285 EmitBlock(DoneBB, /*IsFinished=*/true);
2288 static void emitAlignedClause(CodeGenFunction &CGF,
2289 const OMPExecutableDirective &D) {
2290 if (!CGF.HaveInsertPoint())
2291 return;
2292 for (const auto *Clause : D.getClausesOfKind<OMPAlignedClause>()) {
2293 llvm::APInt ClauseAlignment(64, 0);
2294 if (const Expr *AlignmentExpr = Clause->getAlignment()) {
2295 auto *AlignmentCI =
2296 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
2297 ClauseAlignment = AlignmentCI->getValue();
2299 for (const Expr *E : Clause->varlist()) {
2300 llvm::APInt Alignment(ClauseAlignment);
2301 if (Alignment == 0) {
2302 // OpenMP [2.8.1, Description]
2303 // If no optional parameter is specified, implementation-defined default
2304 // alignments for SIMD instructions on the target platforms are assumed.
2305 Alignment =
2306 CGF.getContext()
2307 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2308 E->getType()->getPointeeType()))
2309 .getQuantity();
2311 assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2312 "alignment is not power of 2");
2313 if (Alignment != 0) {
2314 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
2315 CGF.emitAlignmentAssumption(
2316 PtrValue, E, /*No second loc needed*/ SourceLocation(),
2317 llvm::ConstantInt::get(CGF.getLLVMContext(), Alignment));
2323 void CodeGenFunction::EmitOMPPrivateLoopCounters(
2324 const OMPLoopDirective &S, CodeGenFunction::OMPPrivateScope &LoopScope) {
2325 if (!HaveInsertPoint())
2326 return;
2327 auto I = S.private_counters().begin();
2328 for (const Expr *E : S.counters()) {
2329 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2330 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(*I)->getDecl());
2331 // Emit var without initialization.
2332 AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
2333 EmitAutoVarCleanups(VarEmission);
2334 LocalDeclMap.erase(PrivateVD);
2335 (void)LoopScope.addPrivate(VD, VarEmission.getAllocatedAddress());
2336 if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
2337 VD->hasGlobalStorage()) {
2338 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
2339 LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
2340 E->getType(), VK_LValue, E->getExprLoc());
2341 (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress());
2342 } else {
2343 (void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
2345 ++I;
2347 // Privatize extra loop counters used in loops for ordered(n) clauses.
2348 for (const auto *C : S.getClausesOfKind<OMPOrderedClause>()) {
2349 if (!C->getNumForLoops())
2350 continue;
2351 for (unsigned I = S.getLoopsNumber(), E = C->getLoopNumIterations().size();
2352 I < E; ++I) {
2353 const auto *DRE = cast<DeclRefExpr>(C->getLoopCounter(I));
2354 const auto *VD = cast<VarDecl>(DRE->getDecl());
2355 // Override only those variables that can be captured to avoid re-emission
2356 // of the variables declared within the loops.
2357 if (DRE->refersToEnclosingVariableOrCapture()) {
2358 (void)LoopScope.addPrivate(
2359 VD, CreateMemTemp(DRE->getType(), VD->getName()));
2365 static void emitPreCond(CodeGenFunction &CGF, const OMPLoopDirective &S,
2366 const Expr *Cond, llvm::BasicBlock *TrueBlock,
2367 llvm::BasicBlock *FalseBlock, uint64_t TrueCount) {
2368 if (!CGF.HaveInsertPoint())
2369 return;
2371 CodeGenFunction::OMPPrivateScope PreCondScope(CGF);
2372 CGF.EmitOMPPrivateLoopCounters(S, PreCondScope);
2373 (void)PreCondScope.Privatize();
2374 // Get initial values of real counters.
2375 for (const Expr *I : S.inits()) {
2376 CGF.EmitIgnoredExpr(I);
2379 // Create temp loop control variables with their init values to support
2380 // non-rectangular loops.
2381 CodeGenFunction::OMPMapVars PreCondVars;
2382 for (const Expr *E : S.dependent_counters()) {
2383 if (!E)
2384 continue;
2385 assert(!E->getType().getNonReferenceType()->isRecordType() &&
2386 "dependent counter must not be an iterator.");
2387 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2388 Address CounterAddr =
2389 CGF.CreateMemTemp(VD->getType().getNonReferenceType());
2390 (void)PreCondVars.setVarAddr(CGF, VD, CounterAddr);
2392 (void)PreCondVars.apply(CGF);
2393 for (const Expr *E : S.dependent_inits()) {
2394 if (!E)
2395 continue;
2396 CGF.EmitIgnoredExpr(E);
2398 // Check that loop is executed at least one time.
2399 CGF.EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount);
2400 PreCondVars.restore(CGF);
2403 void CodeGenFunction::EmitOMPLinearClause(
2404 const OMPLoopDirective &D, CodeGenFunction::OMPPrivateScope &PrivateScope) {
2405 if (!HaveInsertPoint())
2406 return;
2407 llvm::DenseSet<const VarDecl *> SIMDLCVs;
2408 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(D);
2409 if (isOpenMPSimdDirective(EKind)) {
2410 const auto *LoopDirective = cast<OMPLoopDirective>(&D);
2411 for (const Expr *C : LoopDirective->counters()) {
2412 SIMDLCVs.insert(
2413 cast<VarDecl>(cast<DeclRefExpr>(C)->getDecl())->getCanonicalDecl());
2416 for (const auto *C : D.getClausesOfKind<OMPLinearClause>()) {
2417 auto CurPrivate = C->privates().begin();
2418 for (const Expr *E : C->varlist()) {
2419 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
2420 const auto *PrivateVD =
2421 cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
2422 if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
2423 // Emit private VarDecl with copy init.
2424 EmitVarDecl(*PrivateVD);
2425 bool IsRegistered =
2426 PrivateScope.addPrivate(VD, GetAddrOfLocalVar(PrivateVD));
2427 assert(IsRegistered && "linear var already registered as private");
2428 // Silence the warning about unused variable.
2429 (void)IsRegistered;
2430 } else {
2431 EmitVarDecl(*PrivateVD);
2433 ++CurPrivate;
2438 static void emitSimdlenSafelenClause(CodeGenFunction &CGF,
2439 const OMPExecutableDirective &D) {
2440 if (!CGF.HaveInsertPoint())
2441 return;
2442 if (const auto *C = D.getSingleClause<OMPSimdlenClause>()) {
2443 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
2444 /*ignoreResult=*/true);
2445 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2446 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2447 // In presence of finite 'safelen', it may be unsafe to mark all
2448 // the memory instructions parallel, because loop-carried
2449 // dependences of 'safelen' iterations are possible.
2450 CGF.LoopStack.setParallel(!D.getSingleClause<OMPSafelenClause>());
2451 } else if (const auto *C = D.getSingleClause<OMPSafelenClause>()) {
2452 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
2453 /*ignoreResult=*/true);
2454 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2455 CGF.LoopStack.setVectorizeWidth(Val->getZExtValue());
2456 // In presence of finite 'safelen', it may be unsafe to mark all
2457 // the memory instructions parallel, because loop-carried
2458 // dependences of 'safelen' iterations are possible.
2459 CGF.LoopStack.setParallel(/*Enable=*/false);
2463 void CodeGenFunction::EmitOMPSimdInit(const OMPLoopDirective &D) {
2464 // Walk clauses and process safelen/lastprivate.
2465 LoopStack.setParallel(/*Enable=*/true);
2466 LoopStack.setVectorizeEnable();
2467 emitSimdlenSafelenClause(*this, D);
2468 if (const auto *C = D.getSingleClause<OMPOrderClause>())
2469 if (C->getKind() == OMPC_ORDER_concurrent)
2470 LoopStack.setParallel(/*Enable=*/true);
2471 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(D);
2472 if ((EKind == OMPD_simd ||
2473 (getLangOpts().OpenMPSimd && isOpenMPSimdDirective(EKind))) &&
2474 llvm::any_of(D.getClausesOfKind<OMPReductionClause>(),
2475 [](const OMPReductionClause *C) {
2476 return C->getModifier() == OMPC_REDUCTION_inscan;
2478 // Disable parallel access in case of prefix sum.
2479 LoopStack.setParallel(/*Enable=*/false);
2482 void CodeGenFunction::EmitOMPSimdFinal(
2483 const OMPLoopDirective &D,
2484 const llvm::function_ref<llvm::Value *(CodeGenFunction &)> CondGen) {
2485 if (!HaveInsertPoint())
2486 return;
2487 llvm::BasicBlock *DoneBB = nullptr;
2488 auto IC = D.counters().begin();
2489 auto IPC = D.private_counters().begin();
2490 for (const Expr *F : D.finals()) {
2491 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>((*IC))->getDecl());
2492 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>((*IPC))->getDecl());
2493 const auto *CED = dyn_cast<OMPCapturedExprDecl>(OrigVD);
2494 if (LocalDeclMap.count(OrigVD) || CapturedStmtInfo->lookup(OrigVD) ||
2495 OrigVD->hasGlobalStorage() || CED) {
2496 if (!DoneBB) {
2497 if (llvm::Value *Cond = CondGen(*this)) {
2498 // If the first post-update expression is found, emit conditional
2499 // block if it was requested.
2500 llvm::BasicBlock *ThenBB = createBasicBlock(".omp.final.then");
2501 DoneBB = createBasicBlock(".omp.final.done");
2502 Builder.CreateCondBr(Cond, ThenBB, DoneBB);
2503 EmitBlock(ThenBB);
2506 Address OrigAddr = Address::invalid();
2507 if (CED) {
2508 OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
2509 } else {
2510 DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
2511 /*RefersToEnclosingVariableOrCapture=*/false,
2512 (*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
2513 OrigAddr = EmitLValue(&DRE).getAddress();
2515 OMPPrivateScope VarScope(*this);
2516 VarScope.addPrivate(OrigVD, OrigAddr);
2517 (void)VarScope.Privatize();
2518 EmitIgnoredExpr(F);
2520 ++IC;
2521 ++IPC;
2523 if (DoneBB)
2524 EmitBlock(DoneBB, /*IsFinished=*/true);
2527 static void emitOMPLoopBodyWithStopPoint(CodeGenFunction &CGF,
2528 const OMPLoopDirective &S,
2529 CodeGenFunction::JumpDest LoopExit) {
2530 CGF.EmitOMPLoopBody(S, LoopExit);
2531 CGF.EmitStopPoint(&S);
2534 /// Emit a helper variable and return corresponding lvalue.
2535 static LValue EmitOMPHelperVar(CodeGenFunction &CGF,
2536 const DeclRefExpr *Helper) {
2537 auto VDecl = cast<VarDecl>(Helper->getDecl());
2538 CGF.EmitVarDecl(*VDecl);
2539 return CGF.EmitLValue(Helper);
2542 static void emitCommonSimdLoop(CodeGenFunction &CGF, const OMPLoopDirective &S,
2543 const RegionCodeGenTy &SimdInitGen,
2544 const RegionCodeGenTy &BodyCodeGen) {
2545 auto &&ThenGen = [&S, &SimdInitGen, &BodyCodeGen](CodeGenFunction &CGF,
2546 PrePostActionTy &) {
2547 CGOpenMPRuntime::NontemporalDeclsRAII NontemporalsRegion(CGF.CGM, S);
2548 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
2549 SimdInitGen(CGF);
2551 BodyCodeGen(CGF);
2553 auto &&ElseGen = [&BodyCodeGen](CodeGenFunction &CGF, PrePostActionTy &) {
2554 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
2555 CGF.LoopStack.setVectorizeEnable(/*Enable=*/false);
2557 BodyCodeGen(CGF);
2559 const Expr *IfCond = nullptr;
2560 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
2561 if (isOpenMPSimdDirective(EKind)) {
2562 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
2563 if (CGF.getLangOpts().OpenMP >= 50 &&
2564 (C->getNameModifier() == OMPD_unknown ||
2565 C->getNameModifier() == OMPD_simd)) {
2566 IfCond = C->getCondition();
2567 break;
2571 if (IfCond) {
2572 CGF.CGM.getOpenMPRuntime().emitIfClause(CGF, IfCond, ThenGen, ElseGen);
2573 } else {
2574 RegionCodeGenTy ThenRCG(ThenGen);
2575 ThenRCG(CGF);
2579 static void emitOMPSimdRegion(CodeGenFunction &CGF, const OMPLoopDirective &S,
2580 PrePostActionTy &Action) {
2581 Action.Enter(CGF);
2582 OMPLoopScope PreInitScope(CGF, S);
2583 // if (PreCond) {
2584 // for (IV in 0..LastIteration) BODY;
2585 // <Final counter/linear vars updates>;
2586 // }
2588 // The presence of lower/upper bound variable depends on the actual directive
2589 // kind in the AST node. The variables must be emitted because some of the
2590 // expressions associated with the loop will use them.
2591 OpenMPDirectiveKind DKind = S.getDirectiveKind();
2592 if (isOpenMPDistributeDirective(DKind) ||
2593 isOpenMPWorksharingDirective(DKind) || isOpenMPTaskLoopDirective(DKind) ||
2594 isOpenMPGenericLoopDirective(DKind)) {
2595 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()));
2596 (void)EmitOMPHelperVar(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()));
2599 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
2600 // Emit: if (PreCond) - begin.
2601 // If the condition constant folds and can be elided, avoid emitting the
2602 // whole loop.
2603 bool CondConstant;
2604 llvm::BasicBlock *ContBlock = nullptr;
2605 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
2606 if (!CondConstant)
2607 return;
2608 } else {
2609 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("simd.if.then");
2610 ContBlock = CGF.createBasicBlock("simd.if.end");
2611 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
2612 CGF.getProfileCount(&S));
2613 CGF.EmitBlock(ThenBlock);
2614 CGF.incrementProfileCounter(&S);
2617 // Emit the loop iteration variable.
2618 const Expr *IVExpr = S.getIterationVariable();
2619 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
2620 CGF.EmitVarDecl(*IVDecl);
2621 CGF.EmitIgnoredExpr(S.getInit());
2623 // Emit the iterations count variable.
2624 // If it is not a variable, Sema decided to calculate iterations count on
2625 // each iteration (e.g., it is foldable into a constant).
2626 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
2627 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
2628 // Emit calculation of the iterations count.
2629 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
2632 emitAlignedClause(CGF, S);
2633 (void)CGF.EmitOMPLinearClauseInit(S);
2635 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
2636 CGF.EmitOMPPrivateClause(S, LoopScope);
2637 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
2638 CGF.EmitOMPLinearClause(S, LoopScope);
2639 CGF.EmitOMPReductionClauseInit(S, LoopScope);
2640 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
2641 CGF, S, CGF.EmitLValue(S.getIterationVariable()));
2642 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
2643 (void)LoopScope.Privatize();
2644 if (isOpenMPTargetExecutionDirective(EKind))
2645 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
2647 emitCommonSimdLoop(
2648 CGF, S,
2649 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
2650 CGF.EmitOMPSimdInit(S);
2652 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2653 CGF.EmitOMPInnerLoop(
2654 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
2655 [&S](CodeGenFunction &CGF) {
2656 emitOMPLoopBodyWithStopPoint(CGF, S,
2657 CodeGenFunction::JumpDest());
2659 [](CodeGenFunction &) {});
2661 CGF.EmitOMPSimdFinal(S, [](CodeGenFunction &) { return nullptr; });
2662 // Emit final copy of the lastprivate variables at the end of loops.
2663 if (HasLastprivateClause)
2664 CGF.EmitOMPLastprivateClauseFinal(S, /*NoFinals=*/true);
2665 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_simd);
2666 emitPostUpdateForReductionClause(CGF, S,
2667 [](CodeGenFunction &) { return nullptr; });
2668 LoopScope.restoreMap();
2669 CGF.EmitOMPLinearClauseFinal(S, [](CodeGenFunction &) { return nullptr; });
2671 // Emit: if (PreCond) - end.
2672 if (ContBlock) {
2673 CGF.EmitBranch(ContBlock);
2674 CGF.EmitBlock(ContBlock, true);
2678 // Pass OMPLoopDirective (instead of OMPSimdDirective) to make this function
2679 // available for "loop bind(thread)", which maps to "simd".
2680 static bool isSimdSupportedByOpenMPIRBuilder(const OMPLoopDirective &S) {
2681 // Check for unsupported clauses
2682 for (OMPClause *C : S.clauses()) {
2683 // Currently only order, simdlen and safelen clauses are supported
2684 if (!(isa<OMPSimdlenClause>(C) || isa<OMPSafelenClause>(C) ||
2685 isa<OMPOrderClause>(C) || isa<OMPAlignedClause>(C)))
2686 return false;
2689 // Check if we have a statement with the ordered directive.
2690 // Visit the statement hierarchy to find a compound statement
2691 // with a ordered directive in it.
2692 if (const auto *CanonLoop = dyn_cast<OMPCanonicalLoop>(S.getRawStmt())) {
2693 if (const Stmt *SyntacticalLoop = CanonLoop->getLoopStmt()) {
2694 for (const Stmt *SubStmt : SyntacticalLoop->children()) {
2695 if (!SubStmt)
2696 continue;
2697 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(SubStmt)) {
2698 for (const Stmt *CSSubStmt : CS->children()) {
2699 if (!CSSubStmt)
2700 continue;
2701 if (isa<OMPOrderedDirective>(CSSubStmt)) {
2702 return false;
2709 return true;
2712 static llvm::MapVector<llvm::Value *, llvm::Value *>
2713 GetAlignedMapping(const OMPLoopDirective &S, CodeGenFunction &CGF) {
2714 llvm::MapVector<llvm::Value *, llvm::Value *> AlignedVars;
2715 for (const auto *Clause : S.getClausesOfKind<OMPAlignedClause>()) {
2716 llvm::APInt ClauseAlignment(64, 0);
2717 if (const Expr *AlignmentExpr = Clause->getAlignment()) {
2718 auto *AlignmentCI =
2719 cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AlignmentExpr));
2720 ClauseAlignment = AlignmentCI->getValue();
2722 for (const Expr *E : Clause->varlist()) {
2723 llvm::APInt Alignment(ClauseAlignment);
2724 if (Alignment == 0) {
2725 // OpenMP [2.8.1, Description]
2726 // If no optional parameter is specified, implementation-defined default
2727 // alignments for SIMD instructions on the target platforms are assumed.
2728 Alignment =
2729 CGF.getContext()
2730 .toCharUnitsFromBits(CGF.getContext().getOpenMPDefaultSimdAlign(
2731 E->getType()->getPointeeType()))
2732 .getQuantity();
2734 assert((Alignment == 0 || Alignment.isPowerOf2()) &&
2735 "alignment is not power of 2");
2736 llvm::Value *PtrValue = CGF.EmitScalarExpr(E);
2737 AlignedVars[PtrValue] = CGF.Builder.getInt64(Alignment.getSExtValue());
2740 return AlignedVars;
2743 // Pass OMPLoopDirective (instead of OMPSimdDirective) to make this function
2744 // available for "loop bind(thread)", which maps to "simd".
2745 static void emitOMPSimdDirective(const OMPLoopDirective &S,
2746 CodeGenFunction &CGF, CodeGenModule &CGM) {
2747 bool UseOMPIRBuilder =
2748 CGM.getLangOpts().OpenMPIRBuilder && isSimdSupportedByOpenMPIRBuilder(S);
2749 if (UseOMPIRBuilder) {
2750 auto &&CodeGenIRBuilder = [&S, &CGM, UseOMPIRBuilder](CodeGenFunction &CGF,
2751 PrePostActionTy &) {
2752 // Use the OpenMPIRBuilder if enabled.
2753 if (UseOMPIRBuilder) {
2754 llvm::MapVector<llvm::Value *, llvm::Value *> AlignedVars =
2755 GetAlignedMapping(S, CGF);
2756 // Emit the associated statement and get its loop representation.
2757 const Stmt *Inner = S.getRawStmt();
2758 llvm::CanonicalLoopInfo *CLI =
2759 CGF.EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2761 llvm::OpenMPIRBuilder &OMPBuilder =
2762 CGM.getOpenMPRuntime().getOMPBuilder();
2763 // Add SIMD specific metadata
2764 llvm::ConstantInt *Simdlen = nullptr;
2765 if (const auto *C = S.getSingleClause<OMPSimdlenClause>()) {
2766 RValue Len = CGF.EmitAnyExpr(C->getSimdlen(), AggValueSlot::ignored(),
2767 /*ignoreResult=*/true);
2768 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2769 Simdlen = Val;
2771 llvm::ConstantInt *Safelen = nullptr;
2772 if (const auto *C = S.getSingleClause<OMPSafelenClause>()) {
2773 RValue Len = CGF.EmitAnyExpr(C->getSafelen(), AggValueSlot::ignored(),
2774 /*ignoreResult=*/true);
2775 auto *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
2776 Safelen = Val;
2778 llvm::omp::OrderKind Order = llvm::omp::OrderKind::OMP_ORDER_unknown;
2779 if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
2780 if (C->getKind() == OpenMPOrderClauseKind::OMPC_ORDER_concurrent) {
2781 Order = llvm::omp::OrderKind::OMP_ORDER_concurrent;
2784 // Add simd metadata to the collapsed loop. Do not generate
2785 // another loop for if clause. Support for if clause is done earlier.
2786 OMPBuilder.applySimd(CLI, AlignedVars,
2787 /*IfCond*/ nullptr, Order, Simdlen, Safelen);
2788 return;
2792 auto LPCRegion =
2793 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
2794 OMPLexicalScope Scope(CGF, S, OMPD_unknown);
2795 CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd,
2796 CodeGenIRBuilder);
2798 return;
2801 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
2802 CGF.OMPFirstScanLoop = true;
2803 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
2804 emitOMPSimdRegion(CGF, S, Action);
2807 auto LPCRegion =
2808 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
2809 OMPLexicalScope Scope(CGF, S, OMPD_unknown);
2810 CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd, CodeGen);
2812 // Check for outer lastprivate conditional update.
2813 checkForLastprivateConditionalUpdate(CGF, S);
2816 void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
2817 emitOMPSimdDirective(S, *this, CGM);
2820 void CodeGenFunction::EmitOMPTileDirective(const OMPTileDirective &S) {
2821 // Emit the de-sugared statement.
2822 OMPTransformDirectiveScopeRAII TileScope(*this, &S);
2823 EmitStmt(S.getTransformedStmt());
2826 void CodeGenFunction::EmitOMPReverseDirective(const OMPReverseDirective &S) {
2827 // Emit the de-sugared statement.
2828 OMPTransformDirectiveScopeRAII ReverseScope(*this, &S);
2829 EmitStmt(S.getTransformedStmt());
2832 void CodeGenFunction::EmitOMPInterchangeDirective(
2833 const OMPInterchangeDirective &S) {
2834 // Emit the de-sugared statement.
2835 OMPTransformDirectiveScopeRAII InterchangeScope(*this, &S);
2836 EmitStmt(S.getTransformedStmt());
2839 void CodeGenFunction::EmitOMPUnrollDirective(const OMPUnrollDirective &S) {
2840 bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder;
2842 if (UseOMPIRBuilder) {
2843 auto DL = SourceLocToDebugLoc(S.getBeginLoc());
2844 const Stmt *Inner = S.getRawStmt();
2846 // Consume nested loop. Clear the entire remaining loop stack because a
2847 // fully unrolled loop is non-transformable. For partial unrolling the
2848 // generated outer loop is pushed back to the stack.
2849 llvm::CanonicalLoopInfo *CLI = EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
2850 OMPLoopNestStack.clear();
2852 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
2854 bool NeedsUnrolledCLI = ExpectedOMPLoopDepth >= 1;
2855 llvm::CanonicalLoopInfo *UnrolledCLI = nullptr;
2857 if (S.hasClausesOfKind<OMPFullClause>()) {
2858 assert(ExpectedOMPLoopDepth == 0);
2859 OMPBuilder.unrollLoopFull(DL, CLI);
2860 } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
2861 uint64_t Factor = 0;
2862 if (Expr *FactorExpr = PartialClause->getFactor()) {
2863 Factor = FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
2864 assert(Factor >= 1 && "Only positive factors are valid");
2866 OMPBuilder.unrollLoopPartial(DL, CLI, Factor,
2867 NeedsUnrolledCLI ? &UnrolledCLI : nullptr);
2868 } else {
2869 OMPBuilder.unrollLoopHeuristic(DL, CLI);
2872 assert((!NeedsUnrolledCLI || UnrolledCLI) &&
2873 "NeedsUnrolledCLI implies UnrolledCLI to be set");
2874 if (UnrolledCLI)
2875 OMPLoopNestStack.push_back(UnrolledCLI);
2877 return;
2880 // This function is only called if the unrolled loop is not consumed by any
2881 // other loop-associated construct. Such a loop-associated construct will have
2882 // used the transformed AST.
2884 // Set the unroll metadata for the next emitted loop.
2885 LoopStack.setUnrollState(LoopAttributes::Enable);
2887 if (S.hasClausesOfKind<OMPFullClause>()) {
2888 LoopStack.setUnrollState(LoopAttributes::Full);
2889 } else if (auto *PartialClause = S.getSingleClause<OMPPartialClause>()) {
2890 if (Expr *FactorExpr = PartialClause->getFactor()) {
2891 uint64_t Factor =
2892 FactorExpr->EvaluateKnownConstInt(getContext()).getZExtValue();
2893 assert(Factor >= 1 && "Only positive factors are valid");
2894 LoopStack.setUnrollCount(Factor);
2898 EmitStmt(S.getAssociatedStmt());
2901 void CodeGenFunction::EmitOMPOuterLoop(
2902 bool DynamicOrOrdered, bool IsMonotonic, const OMPLoopDirective &S,
2903 CodeGenFunction::OMPPrivateScope &LoopScope,
2904 const CodeGenFunction::OMPLoopArguments &LoopArgs,
2905 const CodeGenFunction::CodeGenLoopTy &CodeGenLoop,
2906 const CodeGenFunction::CodeGenOrderedTy &CodeGenOrdered) {
2907 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
2909 const Expr *IVExpr = S.getIterationVariable();
2910 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
2911 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
2913 JumpDest LoopExit = getJumpDestInCurrentScope("omp.dispatch.end");
2915 // Start the loop with a block that tests the condition.
2916 llvm::BasicBlock *CondBlock = createBasicBlock("omp.dispatch.cond");
2917 EmitBlock(CondBlock);
2918 const SourceRange R = S.getSourceRange();
2919 OMPLoopNestStack.clear();
2920 LoopStack.push(CondBlock, SourceLocToDebugLoc(R.getBegin()),
2921 SourceLocToDebugLoc(R.getEnd()));
2923 llvm::Value *BoolCondVal = nullptr;
2924 if (!DynamicOrOrdered) {
2925 // UB = min(UB, GlobalUB) or
2926 // UB = min(UB, PrevUB) for combined loop sharing constructs (e.g.
2927 // 'distribute parallel for')
2928 EmitIgnoredExpr(LoopArgs.EUB);
2929 // IV = LB
2930 EmitIgnoredExpr(LoopArgs.Init);
2931 // IV < UB
2932 BoolCondVal = EvaluateExprAsBool(LoopArgs.Cond);
2933 } else {
2934 BoolCondVal =
2935 RT.emitForNext(*this, S.getBeginLoc(), IVSize, IVSigned, LoopArgs.IL,
2936 LoopArgs.LB, LoopArgs.UB, LoopArgs.ST);
2939 // If there are any cleanups between here and the loop-exit scope,
2940 // create a block to stage a loop exit along.
2941 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
2942 if (LoopScope.requiresCleanups())
2943 ExitBlock = createBasicBlock("omp.dispatch.cleanup");
2945 llvm::BasicBlock *LoopBody = createBasicBlock("omp.dispatch.body");
2946 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
2947 if (ExitBlock != LoopExit.getBlock()) {
2948 EmitBlock(ExitBlock);
2949 EmitBranchThroughCleanup(LoopExit);
2951 EmitBlock(LoopBody);
2953 // Emit "IV = LB" (in case of static schedule, we have already calculated new
2954 // LB for loop condition and emitted it above).
2955 if (DynamicOrOrdered)
2956 EmitIgnoredExpr(LoopArgs.Init);
2958 // Create a block for the increment.
2959 JumpDest Continue = getJumpDestInCurrentScope("omp.dispatch.inc");
2960 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
2962 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
2963 emitCommonSimdLoop(
2964 *this, S,
2965 [&S, IsMonotonic, EKind](CodeGenFunction &CGF, PrePostActionTy &) {
2966 // Generate !llvm.loop.parallel metadata for loads and stores for loops
2967 // with dynamic/guided scheduling and without ordered clause.
2968 if (!isOpenMPSimdDirective(EKind)) {
2969 CGF.LoopStack.setParallel(!IsMonotonic);
2970 if (const auto *C = S.getSingleClause<OMPOrderClause>())
2971 if (C->getKind() == OMPC_ORDER_concurrent)
2972 CGF.LoopStack.setParallel(/*Enable=*/true);
2973 } else {
2974 CGF.EmitOMPSimdInit(S);
2977 [&S, &LoopArgs, LoopExit, &CodeGenLoop, IVSize, IVSigned, &CodeGenOrdered,
2978 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
2979 SourceLocation Loc = S.getBeginLoc();
2980 // when 'distribute' is not combined with a 'for':
2981 // while (idx <= UB) { BODY; ++idx; }
2982 // when 'distribute' is combined with a 'for'
2983 // (e.g. 'distribute parallel for')
2984 // while (idx <= UB) { <CodeGen rest of pragma>; idx += ST; }
2985 CGF.EmitOMPInnerLoop(
2986 S, LoopScope.requiresCleanups(), LoopArgs.Cond, LoopArgs.IncExpr,
2987 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
2988 CodeGenLoop(CGF, S, LoopExit);
2990 [IVSize, IVSigned, Loc, &CodeGenOrdered](CodeGenFunction &CGF) {
2991 CodeGenOrdered(CGF, Loc, IVSize, IVSigned);
2995 EmitBlock(Continue.getBlock());
2996 BreakContinueStack.pop_back();
2997 if (!DynamicOrOrdered) {
2998 // Emit "LB = LB + Stride", "UB = UB + Stride".
2999 EmitIgnoredExpr(LoopArgs.NextLB);
3000 EmitIgnoredExpr(LoopArgs.NextUB);
3003 EmitBranch(CondBlock);
3004 OMPLoopNestStack.clear();
3005 LoopStack.pop();
3006 // Emit the fall-through block.
3007 EmitBlock(LoopExit.getBlock());
3009 // Tell the runtime we are done.
3010 auto &&CodeGen = [DynamicOrOrdered, &S, &LoopArgs](CodeGenFunction &CGF) {
3011 if (!DynamicOrOrdered)
3012 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3013 LoopArgs.DKind);
3015 OMPCancelStack.emitExit(*this, EKind, CodeGen);
3018 void CodeGenFunction::EmitOMPForOuterLoop(
3019 const OpenMPScheduleTy &ScheduleKind, bool IsMonotonic,
3020 const OMPLoopDirective &S, OMPPrivateScope &LoopScope, bool Ordered,
3021 const OMPLoopArguments &LoopArgs,
3022 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
3023 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3025 // Dynamic scheduling of the outer loop (dynamic, guided, auto, runtime).
3026 const bool DynamicOrOrdered = Ordered || RT.isDynamic(ScheduleKind.Schedule);
3028 assert((Ordered || !RT.isStaticNonchunked(ScheduleKind.Schedule,
3029 LoopArgs.Chunk != nullptr)) &&
3030 "static non-chunked schedule does not need outer loop");
3032 // Emit outer loop.
3034 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
3035 // When schedule(dynamic,chunk_size) is specified, the iterations are
3036 // distributed to threads in the team in chunks as the threads request them.
3037 // Each thread executes a chunk of iterations, then requests another chunk,
3038 // until no chunks remain to be distributed. Each chunk contains chunk_size
3039 // iterations, except for the last chunk to be distributed, which may have
3040 // fewer iterations. When no chunk_size is specified, it defaults to 1.
3042 // When schedule(guided,chunk_size) is specified, the iterations are assigned
3043 // to threads in the team in chunks as the executing threads request them.
3044 // Each thread executes a chunk of iterations, then requests another chunk,
3045 // until no chunks remain to be assigned. For a chunk_size of 1, the size of
3046 // each chunk is proportional to the number of unassigned iterations divided
3047 // by the number of threads in the team, decreasing to 1. For a chunk_size
3048 // with value k (greater than 1), the size of each chunk is determined in the
3049 // same way, with the restriction that the chunks do not contain fewer than k
3050 // iterations (except for the last chunk to be assigned, which may have fewer
3051 // than k iterations).
3053 // When schedule(auto) is specified, the decision regarding scheduling is
3054 // delegated to the compiler and/or runtime system. The programmer gives the
3055 // implementation the freedom to choose any possible mapping of iterations to
3056 // threads in the team.
3058 // When schedule(runtime) is specified, the decision regarding scheduling is
3059 // deferred until run time, and the schedule and chunk size are taken from the
3060 // run-sched-var ICV. If the ICV is set to auto, the schedule is
3061 // implementation defined
3063 // __kmpc_dispatch_init();
3064 // while(__kmpc_dispatch_next(&LB, &UB)) {
3065 // idx = LB;
3066 // while (idx <= UB) { BODY; ++idx;
3067 // __kmpc_dispatch_fini_(4|8)[u](); // For ordered loops only.
3068 // } // inner loop
3069 // }
3070 // __kmpc_dispatch_deinit();
3072 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
3073 // When schedule(static, chunk_size) is specified, iterations are divided into
3074 // chunks of size chunk_size, and the chunks are assigned to the threads in
3075 // the team in a round-robin fashion in the order of the thread number.
3077 // while(UB = min(UB, GlobalUB), idx = LB, idx < UB) {
3078 // while (idx <= UB) { BODY; ++idx; } // inner loop
3079 // LB = LB + ST;
3080 // UB = UB + ST;
3081 // }
3084 const Expr *IVExpr = S.getIterationVariable();
3085 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3086 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3088 if (DynamicOrOrdered) {
3089 const std::pair<llvm::Value *, llvm::Value *> DispatchBounds =
3090 CGDispatchBounds(*this, S, LoopArgs.LB, LoopArgs.UB);
3091 llvm::Value *LBVal = DispatchBounds.first;
3092 llvm::Value *UBVal = DispatchBounds.second;
3093 CGOpenMPRuntime::DispatchRTInput DipatchRTInputValues = {LBVal, UBVal,
3094 LoopArgs.Chunk};
3095 RT.emitForDispatchInit(*this, S.getBeginLoc(), ScheduleKind, IVSize,
3096 IVSigned, Ordered, DipatchRTInputValues);
3097 } else {
3098 CGOpenMPRuntime::StaticRTInput StaticInit(
3099 IVSize, IVSigned, Ordered, LoopArgs.IL, LoopArgs.LB, LoopArgs.UB,
3100 LoopArgs.ST, LoopArgs.Chunk);
3101 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
3102 RT.emitForStaticInit(*this, S.getBeginLoc(), EKind, ScheduleKind,
3103 StaticInit);
3106 auto &&CodeGenOrdered = [Ordered](CodeGenFunction &CGF, SourceLocation Loc,
3107 const unsigned IVSize,
3108 const bool IVSigned) {
3109 if (Ordered) {
3110 CGF.CGM.getOpenMPRuntime().emitForOrderedIterationEnd(CGF, Loc, IVSize,
3111 IVSigned);
3115 OMPLoopArguments OuterLoopArgs(LoopArgs.LB, LoopArgs.UB, LoopArgs.ST,
3116 LoopArgs.IL, LoopArgs.Chunk, LoopArgs.EUB);
3117 OuterLoopArgs.IncExpr = S.getInc();
3118 OuterLoopArgs.Init = S.getInit();
3119 OuterLoopArgs.Cond = S.getCond();
3120 OuterLoopArgs.NextLB = S.getNextLowerBound();
3121 OuterLoopArgs.NextUB = S.getNextUpperBound();
3122 OuterLoopArgs.DKind = LoopArgs.DKind;
3123 EmitOMPOuterLoop(DynamicOrOrdered, IsMonotonic, S, LoopScope, OuterLoopArgs,
3124 emitOMPLoopBodyWithStopPoint, CodeGenOrdered);
3125 if (DynamicOrOrdered) {
3126 RT.emitForDispatchDeinit(*this, S.getBeginLoc());
3130 static void emitEmptyOrdered(CodeGenFunction &, SourceLocation Loc,
3131 const unsigned IVSize, const bool IVSigned) {}
3133 void CodeGenFunction::EmitOMPDistributeOuterLoop(
3134 OpenMPDistScheduleClauseKind ScheduleKind, const OMPLoopDirective &S,
3135 OMPPrivateScope &LoopScope, const OMPLoopArguments &LoopArgs,
3136 const CodeGenLoopTy &CodeGenLoopContent) {
3138 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3140 // Emit outer loop.
3141 // Same behavior as a OMPForOuterLoop, except that schedule cannot be
3142 // dynamic
3145 const Expr *IVExpr = S.getIterationVariable();
3146 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3147 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3148 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
3150 CGOpenMPRuntime::StaticRTInput StaticInit(
3151 IVSize, IVSigned, /* Ordered = */ false, LoopArgs.IL, LoopArgs.LB,
3152 LoopArgs.UB, LoopArgs.ST, LoopArgs.Chunk);
3153 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind, StaticInit);
3155 // for combined 'distribute' and 'for' the increment expression of distribute
3156 // is stored in DistInc. For 'distribute' alone, it is in Inc.
3157 Expr *IncExpr;
3158 if (isOpenMPLoopBoundSharingDirective(EKind))
3159 IncExpr = S.getDistInc();
3160 else
3161 IncExpr = S.getInc();
3163 // this routine is shared by 'omp distribute parallel for' and
3164 // 'omp distribute': select the right EUB expression depending on the
3165 // directive
3166 OMPLoopArguments OuterLoopArgs;
3167 OuterLoopArgs.LB = LoopArgs.LB;
3168 OuterLoopArgs.UB = LoopArgs.UB;
3169 OuterLoopArgs.ST = LoopArgs.ST;
3170 OuterLoopArgs.IL = LoopArgs.IL;
3171 OuterLoopArgs.Chunk = LoopArgs.Chunk;
3172 OuterLoopArgs.EUB = isOpenMPLoopBoundSharingDirective(EKind)
3173 ? S.getCombinedEnsureUpperBound()
3174 : S.getEnsureUpperBound();
3175 OuterLoopArgs.IncExpr = IncExpr;
3176 OuterLoopArgs.Init = isOpenMPLoopBoundSharingDirective(EKind)
3177 ? S.getCombinedInit()
3178 : S.getInit();
3179 OuterLoopArgs.Cond = isOpenMPLoopBoundSharingDirective(EKind)
3180 ? S.getCombinedCond()
3181 : S.getCond();
3182 OuterLoopArgs.NextLB = isOpenMPLoopBoundSharingDirective(EKind)
3183 ? S.getCombinedNextLowerBound()
3184 : S.getNextLowerBound();
3185 OuterLoopArgs.NextUB = isOpenMPLoopBoundSharingDirective(EKind)
3186 ? S.getCombinedNextUpperBound()
3187 : S.getNextUpperBound();
3188 OuterLoopArgs.DKind = OMPD_distribute;
3190 EmitOMPOuterLoop(/* DynamicOrOrdered = */ false, /* IsMonotonic = */ false, S,
3191 LoopScope, OuterLoopArgs, CodeGenLoopContent,
3192 emitEmptyOrdered);
3195 static std::pair<LValue, LValue>
3196 emitDistributeParallelForInnerBounds(CodeGenFunction &CGF,
3197 const OMPExecutableDirective &S) {
3198 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
3199 LValue LB =
3200 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3201 LValue UB =
3202 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3204 // When composing 'distribute' with 'for' (e.g. as in 'distribute
3205 // parallel for') we need to use the 'distribute'
3206 // chunk lower and upper bounds rather than the whole loop iteration
3207 // space. These are parameters to the outlined function for 'parallel'
3208 // and we copy the bounds of the previous schedule into the
3209 // the current ones.
3210 LValue PrevLB = CGF.EmitLValue(LS.getPrevLowerBoundVariable());
3211 LValue PrevUB = CGF.EmitLValue(LS.getPrevUpperBoundVariable());
3212 llvm::Value *PrevLBVal = CGF.EmitLoadOfScalar(
3213 PrevLB, LS.getPrevLowerBoundVariable()->getExprLoc());
3214 PrevLBVal = CGF.EmitScalarConversion(
3215 PrevLBVal, LS.getPrevLowerBoundVariable()->getType(),
3216 LS.getIterationVariable()->getType(),
3217 LS.getPrevLowerBoundVariable()->getExprLoc());
3218 llvm::Value *PrevUBVal = CGF.EmitLoadOfScalar(
3219 PrevUB, LS.getPrevUpperBoundVariable()->getExprLoc());
3220 PrevUBVal = CGF.EmitScalarConversion(
3221 PrevUBVal, LS.getPrevUpperBoundVariable()->getType(),
3222 LS.getIterationVariable()->getType(),
3223 LS.getPrevUpperBoundVariable()->getExprLoc());
3225 CGF.EmitStoreOfScalar(PrevLBVal, LB);
3226 CGF.EmitStoreOfScalar(PrevUBVal, UB);
3228 return {LB, UB};
3231 /// if the 'for' loop has a dispatch schedule (e.g. dynamic, guided) then
3232 /// we need to use the LB and UB expressions generated by the worksharing
3233 /// code generation support, whereas in non combined situations we would
3234 /// just emit 0 and the LastIteration expression
3235 /// This function is necessary due to the difference of the LB and UB
3236 /// types for the RT emission routines for 'for_static_init' and
3237 /// 'for_dispatch_init'
3238 static std::pair<llvm::Value *, llvm::Value *>
3239 emitDistributeParallelForDispatchBounds(CodeGenFunction &CGF,
3240 const OMPExecutableDirective &S,
3241 Address LB, Address UB) {
3242 const OMPLoopDirective &LS = cast<OMPLoopDirective>(S);
3243 const Expr *IVExpr = LS.getIterationVariable();
3244 // when implementing a dynamic schedule for a 'for' combined with a
3245 // 'distribute' (e.g. 'distribute parallel for'), the 'for' loop
3246 // is not normalized as each team only executes its own assigned
3247 // distribute chunk
3248 QualType IteratorTy = IVExpr->getType();
3249 llvm::Value *LBVal =
3250 CGF.EmitLoadOfScalar(LB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
3251 llvm::Value *UBVal =
3252 CGF.EmitLoadOfScalar(UB, /*Volatile=*/false, IteratorTy, S.getBeginLoc());
3253 return {LBVal, UBVal};
3256 static void emitDistributeParallelForDistributeInnerBoundParams(
3257 CodeGenFunction &CGF, const OMPExecutableDirective &S,
3258 llvm::SmallVectorImpl<llvm::Value *> &CapturedVars) {
3259 const auto &Dir = cast<OMPLoopDirective>(S);
3260 LValue LB =
3261 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
3262 llvm::Value *LBCast = CGF.Builder.CreateIntCast(
3263 CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
3264 CapturedVars.push_back(LBCast);
3265 LValue UB =
3266 CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
3268 llvm::Value *UBCast = CGF.Builder.CreateIntCast(
3269 CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
3270 CapturedVars.push_back(UBCast);
3273 static void
3274 emitInnerParallelForWhenCombined(CodeGenFunction &CGF,
3275 const OMPLoopDirective &S,
3276 CodeGenFunction::JumpDest LoopExit) {
3277 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
3278 auto &&CGInlinedWorksharingLoop = [&S, EKind](CodeGenFunction &CGF,
3279 PrePostActionTy &Action) {
3280 Action.Enter(CGF);
3281 bool HasCancel = false;
3282 if (!isOpenMPSimdDirective(EKind)) {
3283 if (const auto *D = dyn_cast<OMPTeamsDistributeParallelForDirective>(&S))
3284 HasCancel = D->hasCancel();
3285 else if (const auto *D = dyn_cast<OMPDistributeParallelForDirective>(&S))
3286 HasCancel = D->hasCancel();
3287 else if (const auto *D =
3288 dyn_cast<OMPTargetTeamsDistributeParallelForDirective>(&S))
3289 HasCancel = D->hasCancel();
3291 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, EKind, HasCancel);
3292 CGF.EmitOMPWorksharingLoop(S, S.getPrevEnsureUpperBound(),
3293 emitDistributeParallelForInnerBounds,
3294 emitDistributeParallelForDispatchBounds);
3297 emitCommonOMPParallelDirective(
3298 CGF, S, isOpenMPSimdDirective(EKind) ? OMPD_for_simd : OMPD_for,
3299 CGInlinedWorksharingLoop,
3300 emitDistributeParallelForDistributeInnerBoundParams);
3303 void CodeGenFunction::EmitOMPDistributeParallelForDirective(
3304 const OMPDistributeParallelForDirective &S) {
3305 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3306 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
3307 S.getDistInc());
3309 OMPLexicalScope Scope(*this, S, OMPD_parallel);
3310 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3313 void CodeGenFunction::EmitOMPDistributeParallelForSimdDirective(
3314 const OMPDistributeParallelForSimdDirective &S) {
3315 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3316 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
3317 S.getDistInc());
3319 OMPLexicalScope Scope(*this, S, OMPD_parallel);
3320 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_distribute, CodeGen);
3323 void CodeGenFunction::EmitOMPDistributeSimdDirective(
3324 const OMPDistributeSimdDirective &S) {
3325 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
3326 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
3328 OMPLexicalScope Scope(*this, S, OMPD_unknown);
3329 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
3332 void CodeGenFunction::EmitOMPTargetSimdDeviceFunction(
3333 CodeGenModule &CGM, StringRef ParentName, const OMPTargetSimdDirective &S) {
3334 // Emit SPMD target parallel for region as a standalone region.
3335 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3336 emitOMPSimdRegion(CGF, S, Action);
3338 llvm::Function *Fn;
3339 llvm::Constant *Addr;
3340 // Emit target region as a standalone region.
3341 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
3342 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
3343 assert(Fn && Addr && "Target device function emission failed.");
3346 void CodeGenFunction::EmitOMPTargetSimdDirective(
3347 const OMPTargetSimdDirective &S) {
3348 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
3349 emitOMPSimdRegion(CGF, S, Action);
3351 emitCommonOMPTargetDirective(*this, S, CodeGen);
3354 namespace {
3355 struct ScheduleKindModifiersTy {
3356 OpenMPScheduleClauseKind Kind;
3357 OpenMPScheduleClauseModifier M1;
3358 OpenMPScheduleClauseModifier M2;
3359 ScheduleKindModifiersTy(OpenMPScheduleClauseKind Kind,
3360 OpenMPScheduleClauseModifier M1,
3361 OpenMPScheduleClauseModifier M2)
3362 : Kind(Kind), M1(M1), M2(M2) {}
3364 } // namespace
3366 bool CodeGenFunction::EmitOMPWorksharingLoop(
3367 const OMPLoopDirective &S, Expr *EUB,
3368 const CodeGenLoopBoundsTy &CodeGenLoopBounds,
3369 const CodeGenDispatchBoundsTy &CGDispatchBounds) {
3370 // Emit the loop iteration variable.
3371 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
3372 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
3373 EmitVarDecl(*IVDecl);
3375 // Emit the iterations count variable.
3376 // If it is not a variable, Sema decided to calculate iterations count on each
3377 // iteration (e.g., it is foldable into a constant).
3378 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
3379 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
3380 // Emit calculation of the iterations count.
3381 EmitIgnoredExpr(S.getCalcLastIteration());
3384 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
3386 bool HasLastprivateClause;
3387 // Check pre-condition.
3389 OMPLoopScope PreInitScope(*this, S);
3390 // Skip the entire loop if we don't meet the precondition.
3391 // If the condition constant folds and can be elided, avoid emitting the
3392 // whole loop.
3393 bool CondConstant;
3394 llvm::BasicBlock *ContBlock = nullptr;
3395 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
3396 if (!CondConstant)
3397 return false;
3398 } else {
3399 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
3400 ContBlock = createBasicBlock("omp.precond.end");
3401 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
3402 getProfileCount(&S));
3403 EmitBlock(ThenBlock);
3404 incrementProfileCounter(&S);
3407 RunCleanupsScope DoacrossCleanupScope(*this);
3408 bool Ordered = false;
3409 if (const auto *OrderedClause = S.getSingleClause<OMPOrderedClause>()) {
3410 if (OrderedClause->getNumForLoops())
3411 RT.emitDoacrossInit(*this, S, OrderedClause->getLoopNumIterations());
3412 else
3413 Ordered = true;
3416 llvm::DenseSet<const Expr *> EmittedFinals;
3417 emitAlignedClause(*this, S);
3418 bool HasLinears = EmitOMPLinearClauseInit(S);
3419 // Emit helper vars inits.
3421 std::pair<LValue, LValue> Bounds = CodeGenLoopBounds(*this, S);
3422 LValue LB = Bounds.first;
3423 LValue UB = Bounds.second;
3424 LValue ST =
3425 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
3426 LValue IL =
3427 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
3429 // Emit 'then' code.
3431 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
3432 OMPPrivateScope LoopScope(*this);
3433 if (EmitOMPFirstprivateClause(S, LoopScope) || HasLinears) {
3434 // Emit implicit barrier to synchronize threads and avoid data races on
3435 // initialization of firstprivate variables and post-update of
3436 // lastprivate variables.
3437 CGM.getOpenMPRuntime().emitBarrierCall(
3438 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3439 /*ForceSimpleCall=*/true);
3441 EmitOMPPrivateClause(S, LoopScope);
3442 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(
3443 *this, S, EmitLValue(S.getIterationVariable()));
3444 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
3445 EmitOMPReductionClauseInit(S, LoopScope);
3446 EmitOMPPrivateLoopCounters(S, LoopScope);
3447 EmitOMPLinearClause(S, LoopScope);
3448 (void)LoopScope.Privatize();
3449 if (isOpenMPTargetExecutionDirective(EKind))
3450 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
3452 // Detect the loop schedule kind and chunk.
3453 const Expr *ChunkExpr = nullptr;
3454 OpenMPScheduleTy ScheduleKind;
3455 if (const auto *C = S.getSingleClause<OMPScheduleClause>()) {
3456 ScheduleKind.Schedule = C->getScheduleKind();
3457 ScheduleKind.M1 = C->getFirstScheduleModifier();
3458 ScheduleKind.M2 = C->getSecondScheduleModifier();
3459 ChunkExpr = C->getChunkSize();
3460 } else {
3461 // Default behaviour for schedule clause.
3462 CGM.getOpenMPRuntime().getDefaultScheduleAndChunk(
3463 *this, S, ScheduleKind.Schedule, ChunkExpr);
3465 bool HasChunkSizeOne = false;
3466 llvm::Value *Chunk = nullptr;
3467 if (ChunkExpr) {
3468 Chunk = EmitScalarExpr(ChunkExpr);
3469 Chunk = EmitScalarConversion(Chunk, ChunkExpr->getType(),
3470 S.getIterationVariable()->getType(),
3471 S.getBeginLoc());
3472 Expr::EvalResult Result;
3473 if (ChunkExpr->EvaluateAsInt(Result, getContext())) {
3474 llvm::APSInt EvaluatedChunk = Result.Val.getInt();
3475 HasChunkSizeOne = (EvaluatedChunk.getLimitedValue() == 1);
3478 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
3479 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
3480 // OpenMP 4.5, 2.7.1 Loop Construct, Description.
3481 // If the static schedule kind is specified or if the ordered clause is
3482 // specified, and if no monotonic modifier is specified, the effect will
3483 // be as if the monotonic modifier was specified.
3484 bool StaticChunkedOne =
3485 RT.isStaticChunked(ScheduleKind.Schedule,
3486 /* Chunked */ Chunk != nullptr) &&
3487 HasChunkSizeOne && isOpenMPLoopBoundSharingDirective(EKind);
3488 bool IsMonotonic =
3489 Ordered ||
3490 (ScheduleKind.Schedule == OMPC_SCHEDULE_static &&
3491 !(ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_nonmonotonic ||
3492 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_nonmonotonic)) ||
3493 ScheduleKind.M1 == OMPC_SCHEDULE_MODIFIER_monotonic ||
3494 ScheduleKind.M2 == OMPC_SCHEDULE_MODIFIER_monotonic;
3495 if ((RT.isStaticNonchunked(ScheduleKind.Schedule,
3496 /* Chunked */ Chunk != nullptr) ||
3497 StaticChunkedOne) &&
3498 !Ordered) {
3499 JumpDest LoopExit =
3500 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
3501 emitCommonSimdLoop(
3502 *this, S,
3503 [&S, EKind](CodeGenFunction &CGF, PrePostActionTy &) {
3504 if (isOpenMPSimdDirective(EKind)) {
3505 CGF.EmitOMPSimdInit(S);
3506 } else if (const auto *C = S.getSingleClause<OMPOrderClause>()) {
3507 if (C->getKind() == OMPC_ORDER_concurrent)
3508 CGF.LoopStack.setParallel(/*Enable=*/true);
3511 [IVSize, IVSigned, Ordered, IL, LB, UB, ST, StaticChunkedOne, Chunk,
3512 &S, ScheduleKind, LoopExit, EKind,
3513 &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
3514 // OpenMP [2.7.1, Loop Construct, Description, table 2-1]
3515 // When no chunk_size is specified, the iteration space is divided
3516 // into chunks that are approximately equal in size, and at most
3517 // one chunk is distributed to each thread. Note that the size of
3518 // the chunks is unspecified in this case.
3519 CGOpenMPRuntime::StaticRTInput StaticInit(
3520 IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(),
3521 UB.getAddress(), ST.getAddress(),
3522 StaticChunkedOne ? Chunk : nullptr);
3523 CGF.CGM.getOpenMPRuntime().emitForStaticInit(
3524 CGF, S.getBeginLoc(), EKind, ScheduleKind, StaticInit);
3525 // UB = min(UB, GlobalUB);
3526 if (!StaticChunkedOne)
3527 CGF.EmitIgnoredExpr(S.getEnsureUpperBound());
3528 // IV = LB;
3529 CGF.EmitIgnoredExpr(S.getInit());
3530 // For unchunked static schedule generate:
3532 // while (idx <= UB) {
3533 // BODY;
3534 // ++idx;
3535 // }
3537 // For static schedule with chunk one:
3539 // while (IV <= PrevUB) {
3540 // BODY;
3541 // IV += ST;
3542 // }
3543 CGF.EmitOMPInnerLoop(
3544 S, LoopScope.requiresCleanups(),
3545 StaticChunkedOne ? S.getCombinedParForInDistCond()
3546 : S.getCond(),
3547 StaticChunkedOne ? S.getDistInc() : S.getInc(),
3548 [&S, LoopExit](CodeGenFunction &CGF) {
3549 emitOMPLoopBodyWithStopPoint(CGF, S, LoopExit);
3551 [](CodeGenFunction &) {});
3553 EmitBlock(LoopExit.getBlock());
3554 // Tell the runtime we are done.
3555 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
3556 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
3557 OMPD_for);
3559 OMPCancelStack.emitExit(*this, EKind, CodeGen);
3560 } else {
3561 // Emit the outer loop, which requests its work chunk [LB..UB] from
3562 // runtime and runs the inner loop to process it.
3563 OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
3564 ST.getAddress(), IL.getAddress(), Chunk,
3565 EUB);
3566 LoopArguments.DKind = OMPD_for;
3567 EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
3568 LoopArguments, CGDispatchBounds);
3570 if (isOpenMPSimdDirective(EKind)) {
3571 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
3572 return CGF.Builder.CreateIsNotNull(
3573 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3576 EmitOMPReductionClauseFinal(
3577 S, /*ReductionKind=*/isOpenMPSimdDirective(EKind)
3578 ? /*Parallel and Simd*/ OMPD_parallel_for_simd
3579 : /*Parallel only*/ OMPD_parallel);
3580 // Emit post-update of the reduction variables if IsLastIter != 0.
3581 emitPostUpdateForReductionClause(
3582 *this, S, [IL, &S](CodeGenFunction &CGF) {
3583 return CGF.Builder.CreateIsNotNull(
3584 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3586 // Emit final copy of the lastprivate variables if IsLastIter != 0.
3587 if (HasLastprivateClause)
3588 EmitOMPLastprivateClauseFinal(
3589 S, isOpenMPSimdDirective(EKind),
3590 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
3591 LoopScope.restoreMap();
3592 EmitOMPLinearClauseFinal(S, [IL, &S](CodeGenFunction &CGF) {
3593 return CGF.Builder.CreateIsNotNull(
3594 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
3597 DoacrossCleanupScope.ForceCleanup();
3598 // We're now done with the loop, so jump to the continuation block.
3599 if (ContBlock) {
3600 EmitBranch(ContBlock);
3601 EmitBlock(ContBlock, /*IsFinished=*/true);
3604 return HasLastprivateClause;
3607 /// The following two functions generate expressions for the loop lower
3608 /// and upper bounds in case of static and dynamic (dispatch) schedule
3609 /// of the associated 'for' or 'distribute' loop.
3610 static std::pair<LValue, LValue>
3611 emitForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
3612 const auto &LS = cast<OMPLoopDirective>(S);
3613 LValue LB =
3614 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getLowerBoundVariable()));
3615 LValue UB =
3616 EmitOMPHelperVar(CGF, cast<DeclRefExpr>(LS.getUpperBoundVariable()));
3617 return {LB, UB};
3620 /// When dealing with dispatch schedules (e.g. dynamic, guided) we do not
3621 /// consider the lower and upper bound expressions generated by the
3622 /// worksharing loop support, but we use 0 and the iteration space size as
3623 /// constants
3624 static std::pair<llvm::Value *, llvm::Value *>
3625 emitDispatchForLoopBounds(CodeGenFunction &CGF, const OMPExecutableDirective &S,
3626 Address LB, Address UB) {
3627 const auto &LS = cast<OMPLoopDirective>(S);
3628 const Expr *IVExpr = LS.getIterationVariable();
3629 const unsigned IVSize = CGF.getContext().getTypeSize(IVExpr->getType());
3630 llvm::Value *LBVal = CGF.Builder.getIntN(IVSize, 0);
3631 llvm::Value *UBVal = CGF.EmitScalarExpr(LS.getLastIteration());
3632 return {LBVal, UBVal};
3635 /// Emits internal temp array declarations for the directive with inscan
3636 /// reductions.
3637 /// The code is the following:
3638 /// \code
3639 /// size num_iters = <num_iters>;
3640 /// <type> buffer[num_iters];
3641 /// \endcode
3642 static void emitScanBasedDirectiveDecls(
3643 CodeGenFunction &CGF, const OMPLoopDirective &S,
3644 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
3645 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3646 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3647 SmallVector<const Expr *, 4> Shareds;
3648 SmallVector<const Expr *, 4> Privates;
3649 SmallVector<const Expr *, 4> ReductionOps;
3650 SmallVector<const Expr *, 4> CopyArrayTemps;
3651 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3652 assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3653 "Only inscan reductions are expected.");
3654 Shareds.append(C->varlist_begin(), C->varlist_end());
3655 Privates.append(C->privates().begin(), C->privates().end());
3656 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3657 CopyArrayTemps.append(C->copy_array_temps().begin(),
3658 C->copy_array_temps().end());
3661 // Emit buffers for each reduction variables.
3662 // ReductionCodeGen is required to emit correctly the code for array
3663 // reductions.
3664 ReductionCodeGen RedCG(Shareds, Shareds, Privates, ReductionOps);
3665 unsigned Count = 0;
3666 auto *ITA = CopyArrayTemps.begin();
3667 for (const Expr *IRef : Privates) {
3668 const auto *PrivateVD = cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
3669 // Emit variably modified arrays, used for arrays/array sections
3670 // reductions.
3671 if (PrivateVD->getType()->isVariablyModifiedType()) {
3672 RedCG.emitSharedOrigLValue(CGF, Count);
3673 RedCG.emitAggregateType(CGF, Count);
3675 CodeGenFunction::OpaqueValueMapping DimMapping(
3676 CGF,
3677 cast<OpaqueValueExpr>(
3678 cast<VariableArrayType>((*ITA)->getType()->getAsArrayTypeUnsafe())
3679 ->getSizeExpr()),
3680 RValue::get(OMPScanNumIterations));
3681 // Emit temp buffer.
3682 CGF.EmitVarDecl(*cast<VarDecl>(cast<DeclRefExpr>(*ITA)->getDecl()));
3683 ++ITA;
3684 ++Count;
3689 /// Copies final inscan reductions values to the original variables.
3690 /// The code is the following:
3691 /// \code
3692 /// <orig_var> = buffer[num_iters-1];
3693 /// \endcode
3694 static void emitScanBasedDirectiveFinals(
3695 CodeGenFunction &CGF, const OMPLoopDirective &S,
3696 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
3697 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3698 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3699 SmallVector<const Expr *, 4> Shareds;
3700 SmallVector<const Expr *, 4> LHSs;
3701 SmallVector<const Expr *, 4> RHSs;
3702 SmallVector<const Expr *, 4> Privates;
3703 SmallVector<const Expr *, 4> CopyOps;
3704 SmallVector<const Expr *, 4> CopyArrayElems;
3705 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3706 assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3707 "Only inscan reductions are expected.");
3708 Shareds.append(C->varlist_begin(), C->varlist_end());
3709 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3710 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3711 Privates.append(C->privates().begin(), C->privates().end());
3712 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
3713 CopyArrayElems.append(C->copy_array_elems().begin(),
3714 C->copy_array_elems().end());
3716 // Create temp var and copy LHS value to this temp value.
3717 // LHS = TMP[LastIter];
3718 llvm::Value *OMPLast = CGF.Builder.CreateNSWSub(
3719 OMPScanNumIterations,
3720 llvm::ConstantInt::get(CGF.SizeTy, 1, /*isSigned=*/false));
3721 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
3722 const Expr *PrivateExpr = Privates[I];
3723 const Expr *OrigExpr = Shareds[I];
3724 const Expr *CopyArrayElem = CopyArrayElems[I];
3725 CodeGenFunction::OpaqueValueMapping IdxMapping(
3726 CGF,
3727 cast<OpaqueValueExpr>(
3728 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3729 RValue::get(OMPLast));
3730 LValue DestLVal = CGF.EmitLValue(OrigExpr);
3731 LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
3732 CGF.EmitOMPCopy(
3733 PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
3734 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
3735 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
3739 /// Emits the code for the directive with inscan reductions.
3740 /// The code is the following:
3741 /// \code
3742 /// #pragma omp ...
3743 /// for (i: 0..<num_iters>) {
3744 /// <input phase>;
3745 /// buffer[i] = red;
3746 /// }
3747 /// #pragma omp master // in parallel region
3748 /// for (int k = 0; k != ceil(log2(num_iters)); ++k)
3749 /// for (size cnt = last_iter; cnt >= pow(2, k); --k)
3750 /// buffer[i] op= buffer[i-pow(2,k)];
3751 /// #pragma omp barrier // in parallel region
3752 /// #pragma omp ...
3753 /// for (0..<num_iters>) {
3754 /// red = InclusiveScan ? buffer[i] : buffer[i-1];
3755 /// <scan phase>;
3756 /// }
3757 /// \endcode
3758 static void emitScanBasedDirective(
3759 CodeGenFunction &CGF, const OMPLoopDirective &S,
3760 llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen,
3761 llvm::function_ref<void(CodeGenFunction &)> FirstGen,
3762 llvm::function_ref<void(CodeGenFunction &)> SecondGen) {
3763 llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
3764 NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
3765 SmallVector<const Expr *, 4> Privates;
3766 SmallVector<const Expr *, 4> ReductionOps;
3767 SmallVector<const Expr *, 4> LHSs;
3768 SmallVector<const Expr *, 4> RHSs;
3769 SmallVector<const Expr *, 4> CopyArrayElems;
3770 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
3771 assert(C->getModifier() == OMPC_REDUCTION_inscan &&
3772 "Only inscan reductions are expected.");
3773 Privates.append(C->privates().begin(), C->privates().end());
3774 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
3775 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
3776 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
3777 CopyArrayElems.append(C->copy_array_elems().begin(),
3778 C->copy_array_elems().end());
3780 CodeGenFunction::ParentLoopDirectiveForScanRegion ScanRegion(CGF, S);
3782 // Emit loop with input phase:
3783 // #pragma omp ...
3784 // for (i: 0..<num_iters>) {
3785 // <input phase>;
3786 // buffer[i] = red;
3787 // }
3788 CGF.OMPFirstScanLoop = true;
3789 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
3790 FirstGen(CGF);
3792 // #pragma omp barrier // in parallel region
3793 auto &&CodeGen = [&S, OMPScanNumIterations, &LHSs, &RHSs, &CopyArrayElems,
3794 &ReductionOps,
3795 &Privates](CodeGenFunction &CGF, PrePostActionTy &Action) {
3796 Action.Enter(CGF);
3797 // Emit prefix reduction:
3798 // #pragma omp master // in parallel region
3799 // for (int k = 0; k <= ceil(log2(n)); ++k)
3800 llvm::BasicBlock *InputBB = CGF.Builder.GetInsertBlock();
3801 llvm::BasicBlock *LoopBB = CGF.createBasicBlock("omp.outer.log.scan.body");
3802 llvm::BasicBlock *ExitBB = CGF.createBasicBlock("omp.outer.log.scan.exit");
3803 llvm::Function *F =
3804 CGF.CGM.getIntrinsic(llvm::Intrinsic::log2, CGF.DoubleTy);
3805 llvm::Value *Arg =
3806 CGF.Builder.CreateUIToFP(OMPScanNumIterations, CGF.DoubleTy);
3807 llvm::Value *LogVal = CGF.EmitNounwindRuntimeCall(F, Arg);
3808 F = CGF.CGM.getIntrinsic(llvm::Intrinsic::ceil, CGF.DoubleTy);
3809 LogVal = CGF.EmitNounwindRuntimeCall(F, LogVal);
3810 LogVal = CGF.Builder.CreateFPToUI(LogVal, CGF.IntTy);
3811 llvm::Value *NMin1 = CGF.Builder.CreateNUWSub(
3812 OMPScanNumIterations, llvm::ConstantInt::get(CGF.SizeTy, 1));
3813 auto DL = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getBeginLoc());
3814 CGF.EmitBlock(LoopBB);
3815 auto *Counter = CGF.Builder.CreatePHI(CGF.IntTy, 2);
3816 // size pow2k = 1;
3817 auto *Pow2K = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3818 Counter->addIncoming(llvm::ConstantInt::get(CGF.IntTy, 0), InputBB);
3819 Pow2K->addIncoming(llvm::ConstantInt::get(CGF.SizeTy, 1), InputBB);
3820 // for (size i = n - 1; i >= 2 ^ k; --i)
3821 // tmp[i] op= tmp[i-pow2k];
3822 llvm::BasicBlock *InnerLoopBB =
3823 CGF.createBasicBlock("omp.inner.log.scan.body");
3824 llvm::BasicBlock *InnerExitBB =
3825 CGF.createBasicBlock("omp.inner.log.scan.exit");
3826 llvm::Value *CmpI = CGF.Builder.CreateICmpUGE(NMin1, Pow2K);
3827 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3828 CGF.EmitBlock(InnerLoopBB);
3829 auto *IVal = CGF.Builder.CreatePHI(CGF.SizeTy, 2);
3830 IVal->addIncoming(NMin1, LoopBB);
3832 CodeGenFunction::OMPPrivateScope PrivScope(CGF);
3833 auto *ILHS = LHSs.begin();
3834 auto *IRHS = RHSs.begin();
3835 for (const Expr *CopyArrayElem : CopyArrayElems) {
3836 const auto *LHSVD = cast<VarDecl>(cast<DeclRefExpr>(*ILHS)->getDecl());
3837 const auto *RHSVD = cast<VarDecl>(cast<DeclRefExpr>(*IRHS)->getDecl());
3838 Address LHSAddr = Address::invalid();
3840 CodeGenFunction::OpaqueValueMapping IdxMapping(
3841 CGF,
3842 cast<OpaqueValueExpr>(
3843 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3844 RValue::get(IVal));
3845 LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
3847 PrivScope.addPrivate(LHSVD, LHSAddr);
3848 Address RHSAddr = Address::invalid();
3850 llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
3851 CodeGenFunction::OpaqueValueMapping IdxMapping(
3852 CGF,
3853 cast<OpaqueValueExpr>(
3854 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
3855 RValue::get(OffsetIVal));
3856 RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
3858 PrivScope.addPrivate(RHSVD, RHSAddr);
3859 ++ILHS;
3860 ++IRHS;
3862 PrivScope.Privatize();
3863 CGF.CGM.getOpenMPRuntime().emitReduction(
3864 CGF, S.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
3865 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_unknown});
3867 llvm::Value *NextIVal =
3868 CGF.Builder.CreateNUWSub(IVal, llvm::ConstantInt::get(CGF.SizeTy, 1));
3869 IVal->addIncoming(NextIVal, CGF.Builder.GetInsertBlock());
3870 CmpI = CGF.Builder.CreateICmpUGE(NextIVal, Pow2K);
3871 CGF.Builder.CreateCondBr(CmpI, InnerLoopBB, InnerExitBB);
3872 CGF.EmitBlock(InnerExitBB);
3873 llvm::Value *Next =
3874 CGF.Builder.CreateNUWAdd(Counter, llvm::ConstantInt::get(CGF.IntTy, 1));
3875 Counter->addIncoming(Next, CGF.Builder.GetInsertBlock());
3876 // pow2k <<= 1;
3877 llvm::Value *NextPow2K =
3878 CGF.Builder.CreateShl(Pow2K, 1, "", /*HasNUW=*/true);
3879 Pow2K->addIncoming(NextPow2K, CGF.Builder.GetInsertBlock());
3880 llvm::Value *Cmp = CGF.Builder.CreateICmpNE(Next, LogVal);
3881 CGF.Builder.CreateCondBr(Cmp, LoopBB, ExitBB);
3882 auto DL1 = ApplyDebugLocation::CreateDefaultArtificial(CGF, S.getEndLoc());
3883 CGF.EmitBlock(ExitBB);
3885 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
3886 if (isOpenMPParallelDirective(EKind)) {
3887 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
3888 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
3889 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
3890 /*ForceSimpleCall=*/true);
3891 } else {
3892 RegionCodeGenTy RCG(CodeGen);
3893 RCG(CGF);
3896 CGF.OMPFirstScanLoop = false;
3897 SecondGen(CGF);
3900 static bool emitWorksharingDirective(CodeGenFunction &CGF,
3901 const OMPLoopDirective &S,
3902 bool HasCancel) {
3903 bool HasLastprivates;
3904 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
3905 if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
3906 [](const OMPReductionClause *C) {
3907 return C->getModifier() == OMPC_REDUCTION_inscan;
3908 })) {
3909 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
3910 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
3911 OMPLoopScope LoopScope(CGF, S);
3912 return CGF.EmitScalarExpr(S.getNumIterations());
3914 const auto &&FirstGen = [&S, HasCancel, EKind](CodeGenFunction &CGF) {
3915 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, EKind, HasCancel);
3916 (void)CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3917 emitForLoopBounds,
3918 emitDispatchForLoopBounds);
3919 // Emit an implicit barrier at the end.
3920 CGF.CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(),
3921 OMPD_for);
3923 const auto &&SecondGen = [&S, HasCancel, EKind,
3924 &HasLastprivates](CodeGenFunction &CGF) {
3925 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, EKind, HasCancel);
3926 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3927 emitForLoopBounds,
3928 emitDispatchForLoopBounds);
3930 if (!isOpenMPParallelDirective(EKind))
3931 emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen);
3932 emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
3933 if (!isOpenMPParallelDirective(EKind))
3934 emitScanBasedDirectiveFinals(CGF, S, NumIteratorsGen);
3935 } else {
3936 CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, EKind, HasCancel);
3937 HasLastprivates = CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(),
3938 emitForLoopBounds,
3939 emitDispatchForLoopBounds);
3941 return HasLastprivates;
3944 // Pass OMPLoopDirective (instead of OMPForDirective) to make this check
3945 // available for "loop bind(parallel)", which maps to "for".
3946 static bool isForSupportedByOpenMPIRBuilder(const OMPLoopDirective &S,
3947 bool HasCancel) {
3948 if (HasCancel)
3949 return false;
3950 for (OMPClause *C : S.clauses()) {
3951 if (isa<OMPNowaitClause, OMPBindClause>(C))
3952 continue;
3954 if (auto *SC = dyn_cast<OMPScheduleClause>(C)) {
3955 if (SC->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
3956 return false;
3957 if (SC->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
3958 return false;
3959 switch (SC->getScheduleKind()) {
3960 case OMPC_SCHEDULE_auto:
3961 case OMPC_SCHEDULE_dynamic:
3962 case OMPC_SCHEDULE_runtime:
3963 case OMPC_SCHEDULE_guided:
3964 case OMPC_SCHEDULE_static:
3965 continue;
3966 case OMPC_SCHEDULE_unknown:
3967 return false;
3971 return false;
3974 return true;
3977 static llvm::omp::ScheduleKind
3978 convertClauseKindToSchedKind(OpenMPScheduleClauseKind ScheduleClauseKind) {
3979 switch (ScheduleClauseKind) {
3980 case OMPC_SCHEDULE_unknown:
3981 return llvm::omp::OMP_SCHEDULE_Default;
3982 case OMPC_SCHEDULE_auto:
3983 return llvm::omp::OMP_SCHEDULE_Auto;
3984 case OMPC_SCHEDULE_dynamic:
3985 return llvm::omp::OMP_SCHEDULE_Dynamic;
3986 case OMPC_SCHEDULE_guided:
3987 return llvm::omp::OMP_SCHEDULE_Guided;
3988 case OMPC_SCHEDULE_runtime:
3989 return llvm::omp::OMP_SCHEDULE_Runtime;
3990 case OMPC_SCHEDULE_static:
3991 return llvm::omp::OMP_SCHEDULE_Static;
3993 llvm_unreachable("Unhandled schedule kind");
3996 // Pass OMPLoopDirective (instead of OMPForDirective) to make this function
3997 // available for "loop bind(parallel)", which maps to "for".
3998 static void emitOMPForDirective(const OMPLoopDirective &S, CodeGenFunction &CGF,
3999 CodeGenModule &CGM, bool HasCancel) {
4000 bool HasLastprivates = false;
4001 bool UseOMPIRBuilder = CGM.getLangOpts().OpenMPIRBuilder &&
4002 isForSupportedByOpenMPIRBuilder(S, HasCancel);
4003 auto &&CodeGen = [&S, &CGM, HasCancel, &HasLastprivates,
4004 UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) {
4005 // Use the OpenMPIRBuilder if enabled.
4006 if (UseOMPIRBuilder) {
4007 bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
4009 llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default;
4010 llvm::Value *ChunkSize = nullptr;
4011 if (auto *SchedClause = S.getSingleClause<OMPScheduleClause>()) {
4012 SchedKind =
4013 convertClauseKindToSchedKind(SchedClause->getScheduleKind());
4014 if (const Expr *ChunkSizeExpr = SchedClause->getChunkSize())
4015 ChunkSize = CGF.EmitScalarExpr(ChunkSizeExpr);
4018 // Emit the associated statement and get its loop representation.
4019 const Stmt *Inner = S.getRawStmt();
4020 llvm::CanonicalLoopInfo *CLI =
4021 CGF.EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
4023 llvm::OpenMPIRBuilder &OMPBuilder =
4024 CGM.getOpenMPRuntime().getOMPBuilder();
4025 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
4026 CGF.AllocaInsertPt->getParent(), CGF.AllocaInsertPt->getIterator());
4027 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
4028 OMPBuilder.applyWorkshareLoop(
4029 CGF.Builder.getCurrentDebugLocation(), CLI, AllocaIP,
4030 NeedsBarrier, SchedKind, ChunkSize, /*HasSimdModifier=*/false,
4031 /*HasMonotonicModifier=*/false, /*HasNonmonotonicModifier=*/false,
4032 /*HasOrderedClause=*/false);
4033 assert(AfterIP && "unexpected error creating workshare loop");
4034 return;
4037 HasLastprivates = emitWorksharingDirective(CGF, S, HasCancel);
4040 auto LPCRegion =
4041 CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
4042 OMPLexicalScope Scope(CGF, S, OMPD_unknown);
4043 CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_for, CodeGen,
4044 HasCancel);
4047 if (!UseOMPIRBuilder) {
4048 // Emit an implicit barrier at the end.
4049 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
4050 CGM.getOpenMPRuntime().emitBarrierCall(CGF, S.getBeginLoc(), OMPD_for);
4052 // Check for outer lastprivate conditional update.
4053 checkForLastprivateConditionalUpdate(CGF, S);
4056 void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
4057 return emitOMPForDirective(S, *this, CGM, S.hasCancel());
4060 void CodeGenFunction::EmitOMPForSimdDirective(const OMPForSimdDirective &S) {
4061 bool HasLastprivates = false;
4062 auto &&CodeGen = [&S, &HasLastprivates](CodeGenFunction &CGF,
4063 PrePostActionTy &) {
4064 HasLastprivates = emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
4067 auto LPCRegion =
4068 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4069 OMPLexicalScope Scope(*this, S, OMPD_unknown);
4070 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_simd, CodeGen);
4073 // Emit an implicit barrier at the end.
4074 if (!S.getSingleClause<OMPNowaitClause>() || HasLastprivates)
4075 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_for);
4076 // Check for outer lastprivate conditional update.
4077 checkForLastprivateConditionalUpdate(*this, S);
4080 static LValue createSectionLVal(CodeGenFunction &CGF, QualType Ty,
4081 const Twine &Name,
4082 llvm::Value *Init = nullptr) {
4083 LValue LVal = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty, Name), Ty);
4084 if (Init)
4085 CGF.EmitStoreThroughLValue(RValue::get(Init), LVal, /*isInit*/ true);
4086 return LVal;
4089 void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
4090 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
4091 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
4092 bool HasLastprivates = false;
4093 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
4094 auto &&CodeGen = [&S, CapturedStmt, CS, EKind,
4095 &HasLastprivates](CodeGenFunction &CGF, PrePostActionTy &) {
4096 const ASTContext &C = CGF.getContext();
4097 QualType KmpInt32Ty =
4098 C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1);
4099 // Emit helper vars inits.
4100 LValue LB = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.lb.",
4101 CGF.Builder.getInt32(0));
4102 llvm::ConstantInt *GlobalUBVal = CS != nullptr
4103 ? CGF.Builder.getInt32(CS->size() - 1)
4104 : CGF.Builder.getInt32(0);
4105 LValue UB =
4106 createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.ub.", GlobalUBVal);
4107 LValue ST = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.st.",
4108 CGF.Builder.getInt32(1));
4109 LValue IL = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.il.",
4110 CGF.Builder.getInt32(0));
4111 // Loop counter.
4112 LValue IV = createSectionLVal(CGF, KmpInt32Ty, ".omp.sections.iv.");
4113 OpaqueValueExpr IVRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
4114 CodeGenFunction::OpaqueValueMapping OpaqueIV(CGF, &IVRefExpr, IV);
4115 OpaqueValueExpr UBRefExpr(S.getBeginLoc(), KmpInt32Ty, VK_LValue);
4116 CodeGenFunction::OpaqueValueMapping OpaqueUB(CGF, &UBRefExpr, UB);
4117 // Generate condition for loop.
4118 BinaryOperator *Cond = BinaryOperator::Create(
4119 C, &IVRefExpr, &UBRefExpr, BO_LE, C.BoolTy, VK_PRValue, OK_Ordinary,
4120 S.getBeginLoc(), FPOptionsOverride());
4121 // Increment for loop counter.
4122 UnaryOperator *Inc = UnaryOperator::Create(
4123 C, &IVRefExpr, UO_PreInc, KmpInt32Ty, VK_PRValue, OK_Ordinary,
4124 S.getBeginLoc(), true, FPOptionsOverride());
4125 auto &&BodyGen = [CapturedStmt, CS, &S, &IV](CodeGenFunction &CGF) {
4126 // Iterate through all sections and emit a switch construct:
4127 // switch (IV) {
4128 // case 0:
4129 // <SectionStmt[0]>;
4130 // break;
4131 // ...
4132 // case <NumSection> - 1:
4133 // <SectionStmt[<NumSection> - 1]>;
4134 // break;
4135 // }
4136 // .omp.sections.exit:
4137 llvm::BasicBlock *ExitBB = CGF.createBasicBlock(".omp.sections.exit");
4138 llvm::SwitchInst *SwitchStmt =
4139 CGF.Builder.CreateSwitch(CGF.EmitLoadOfScalar(IV, S.getBeginLoc()),
4140 ExitBB, CS == nullptr ? 1 : CS->size());
4141 if (CS) {
4142 unsigned CaseNumber = 0;
4143 for (const Stmt *SubStmt : CS->children()) {
4144 auto CaseBB = CGF.createBasicBlock(".omp.sections.case");
4145 CGF.EmitBlock(CaseBB);
4146 SwitchStmt->addCase(CGF.Builder.getInt32(CaseNumber), CaseBB);
4147 CGF.EmitStmt(SubStmt);
4148 CGF.EmitBranch(ExitBB);
4149 ++CaseNumber;
4151 } else {
4152 llvm::BasicBlock *CaseBB = CGF.createBasicBlock(".omp.sections.case");
4153 CGF.EmitBlock(CaseBB);
4154 SwitchStmt->addCase(CGF.Builder.getInt32(0), CaseBB);
4155 CGF.EmitStmt(CapturedStmt);
4156 CGF.EmitBranch(ExitBB);
4158 CGF.EmitBlock(ExitBB, /*IsFinished=*/true);
4161 CodeGenFunction::OMPPrivateScope LoopScope(CGF);
4162 if (CGF.EmitOMPFirstprivateClause(S, LoopScope)) {
4163 // Emit implicit barrier to synchronize threads and avoid data races on
4164 // initialization of firstprivate variables and post-update of lastprivate
4165 // variables.
4166 CGF.CGM.getOpenMPRuntime().emitBarrierCall(
4167 CGF, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
4168 /*ForceSimpleCall=*/true);
4170 CGF.EmitOMPPrivateClause(S, LoopScope);
4171 CGOpenMPRuntime::LastprivateConditionalRAII LPCRegion(CGF, S, IV);
4172 HasLastprivates = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
4173 CGF.EmitOMPReductionClauseInit(S, LoopScope);
4174 (void)LoopScope.Privatize();
4175 if (isOpenMPTargetExecutionDirective(EKind))
4176 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
4178 // Emit static non-chunked loop.
4179 OpenMPScheduleTy ScheduleKind;
4180 ScheduleKind.Schedule = OMPC_SCHEDULE_static;
4181 CGOpenMPRuntime::StaticRTInput StaticInit(
4182 /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
4183 LB.getAddress(), UB.getAddress(), ST.getAddress());
4184 CGF.CGM.getOpenMPRuntime().emitForStaticInit(CGF, S.getBeginLoc(), EKind,
4185 ScheduleKind, StaticInit);
4186 // UB = min(UB, GlobalUB);
4187 llvm::Value *UBVal = CGF.EmitLoadOfScalar(UB, S.getBeginLoc());
4188 llvm::Value *MinUBGlobalUB = CGF.Builder.CreateSelect(
4189 CGF.Builder.CreateICmpSLT(UBVal, GlobalUBVal), UBVal, GlobalUBVal);
4190 CGF.EmitStoreOfScalar(MinUBGlobalUB, UB);
4191 // IV = LB;
4192 CGF.EmitStoreOfScalar(CGF.EmitLoadOfScalar(LB, S.getBeginLoc()), IV);
4193 // while (idx <= UB) { BODY; ++idx; }
4194 CGF.EmitOMPInnerLoop(S, /*RequiresCleanup=*/false, Cond, Inc, BodyGen,
4195 [](CodeGenFunction &) {});
4196 // Tell the runtime we are done.
4197 auto &&CodeGen = [&S](CodeGenFunction &CGF) {
4198 CGF.CGM.getOpenMPRuntime().emitForStaticFinish(CGF, S.getEndLoc(),
4199 OMPD_sections);
4201 CGF.OMPCancelStack.emitExit(CGF, EKind, CodeGen);
4202 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4203 // Emit post-update of the reduction variables if IsLastIter != 0.
4204 emitPostUpdateForReductionClause(CGF, S, [IL, &S](CodeGenFunction &CGF) {
4205 return CGF.Builder.CreateIsNotNull(
4206 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
4209 // Emit final copy of the lastprivate variables if IsLastIter != 0.
4210 if (HasLastprivates)
4211 CGF.EmitOMPLastprivateClauseFinal(
4212 S, /*NoFinals=*/false,
4213 CGF.Builder.CreateIsNotNull(
4214 CGF.EmitLoadOfScalar(IL, S.getBeginLoc())));
4217 bool HasCancel = false;
4218 if (auto *OSD = dyn_cast<OMPSectionsDirective>(&S))
4219 HasCancel = OSD->hasCancel();
4220 else if (auto *OPSD = dyn_cast<OMPParallelSectionsDirective>(&S))
4221 HasCancel = OPSD->hasCancel();
4222 OMPCancelStackRAII CancelRegion(*this, EKind, HasCancel);
4223 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_sections, CodeGen,
4224 HasCancel);
4225 // Emit barrier for lastprivates only if 'sections' directive has 'nowait'
4226 // clause. Otherwise the barrier will be generated by the codegen for the
4227 // directive.
4228 if (HasLastprivates && S.getSingleClause<OMPNowaitClause>()) {
4229 // Emit implicit barrier to synchronize threads and avoid data races on
4230 // initialization of firstprivate variables.
4231 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
4232 OMPD_unknown);
4236 void CodeGenFunction::EmitOMPScopeDirective(const OMPScopeDirective &S) {
4238 // Emit code for 'scope' region
4239 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4240 Action.Enter(CGF);
4241 OMPPrivateScope PrivateScope(CGF);
4242 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4243 CGF.EmitOMPPrivateClause(S, PrivateScope);
4244 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4245 (void)PrivateScope.Privatize();
4246 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4247 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4249 auto LPCRegion =
4250 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4251 OMPLexicalScope Scope(*this, S, OMPD_unknown);
4252 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_scope, CodeGen);
4254 // Emit an implicit barrier at the end.
4255 if (!S.getSingleClause<OMPNowaitClause>()) {
4256 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_scope);
4258 // Check for outer lastprivate conditional update.
4259 checkForLastprivateConditionalUpdate(*this, S);
4262 void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
4263 if (CGM.getLangOpts().OpenMPIRBuilder) {
4264 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4265 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4266 using BodyGenCallbackTy = llvm::OpenMPIRBuilder::StorableBodyGenCallbackTy;
4268 auto FiniCB = [this](InsertPointTy IP) {
4269 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4270 return llvm::Error::success();
4273 const CapturedStmt *ICS = S.getInnermostCapturedStmt();
4274 const Stmt *CapturedStmt = S.getInnermostCapturedStmt()->getCapturedStmt();
4275 const auto *CS = dyn_cast<CompoundStmt>(CapturedStmt);
4276 llvm::SmallVector<BodyGenCallbackTy, 4> SectionCBVector;
4277 if (CS) {
4278 for (const Stmt *SubStmt : CS->children()) {
4279 auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP,
4280 InsertPointTy CodeGenIP) {
4281 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4282 *this, SubStmt, AllocaIP, CodeGenIP, "section");
4283 return llvm::Error::success();
4285 SectionCBVector.push_back(SectionCB);
4287 } else {
4288 auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP,
4289 InsertPointTy CodeGenIP) {
4290 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4291 *this, CapturedStmt, AllocaIP, CodeGenIP, "section");
4292 return llvm::Error::success();
4294 SectionCBVector.push_back(SectionCB);
4297 // Privatization callback that performs appropriate action for
4298 // shared/private/firstprivate/lastprivate/copyin/... variables.
4300 // TODO: This defaults to shared right now.
4301 auto PrivCB = [](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
4302 llvm::Value &, llvm::Value &Val, llvm::Value *&ReplVal) {
4303 // The next line is appropriate only for variables (Val) with the
4304 // data-sharing attribute "shared".
4305 ReplVal = &Val;
4307 return CodeGenIP;
4310 CGCapturedStmtInfo CGSI(*ICS, CR_OpenMP);
4311 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(*this, &CGSI);
4312 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
4313 AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
4314 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
4315 OMPBuilder.createSections(Builder, AllocaIP, SectionCBVector, PrivCB,
4316 FiniCB, S.hasCancel(),
4317 S.getSingleClause<OMPNowaitClause>());
4318 assert(AfterIP && "unexpected error creating sections");
4319 Builder.restoreIP(*AfterIP);
4320 return;
4323 auto LPCRegion =
4324 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4325 OMPLexicalScope Scope(*this, S, OMPD_unknown);
4326 EmitSections(S);
4328 // Emit an implicit barrier at the end.
4329 if (!S.getSingleClause<OMPNowaitClause>()) {
4330 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(),
4331 OMPD_sections);
4333 // Check for outer lastprivate conditional update.
4334 checkForLastprivateConditionalUpdate(*this, S);
4337 void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
4338 if (CGM.getLangOpts().OpenMPIRBuilder) {
4339 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4340 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4342 const Stmt *SectionRegionBodyStmt = S.getAssociatedStmt();
4343 auto FiniCB = [this](InsertPointTy IP) {
4344 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4345 return llvm::Error::success();
4348 auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP,
4349 InsertPointTy CodeGenIP) {
4350 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4351 *this, SectionRegionBodyStmt, AllocaIP, CodeGenIP, "section");
4352 return llvm::Error::success();
4355 LexicalScope Scope(*this, S.getSourceRange());
4356 EmitStopPoint(&S);
4357 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
4358 OMPBuilder.createSection(Builder, BodyGenCB, FiniCB);
4359 assert(AfterIP && "unexpected error creating section");
4360 Builder.restoreIP(*AfterIP);
4362 return;
4364 LexicalScope Scope(*this, S.getSourceRange());
4365 EmitStopPoint(&S);
4366 EmitStmt(S.getAssociatedStmt());
4369 void CodeGenFunction::EmitOMPSingleDirective(const OMPSingleDirective &S) {
4370 llvm::SmallVector<const Expr *, 8> CopyprivateVars;
4371 llvm::SmallVector<const Expr *, 8> DestExprs;
4372 llvm::SmallVector<const Expr *, 8> SrcExprs;
4373 llvm::SmallVector<const Expr *, 8> AssignmentOps;
4374 // Check if there are any 'copyprivate' clauses associated with this
4375 // 'single' construct.
4376 // Build a list of copyprivate variables along with helper expressions
4377 // (<source>, <destination>, <destination>=<source> expressions)
4378 for (const auto *C : S.getClausesOfKind<OMPCopyprivateClause>()) {
4379 CopyprivateVars.append(C->varlist_begin(), C->varlist_end());
4380 DestExprs.append(C->destination_exprs().begin(),
4381 C->destination_exprs().end());
4382 SrcExprs.append(C->source_exprs().begin(), C->source_exprs().end());
4383 AssignmentOps.append(C->assignment_ops().begin(),
4384 C->assignment_ops().end());
4386 // Emit code for 'single' region along with 'copyprivate' clauses
4387 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4388 Action.Enter(CGF);
4389 OMPPrivateScope SingleScope(CGF);
4390 (void)CGF.EmitOMPFirstprivateClause(S, SingleScope);
4391 CGF.EmitOMPPrivateClause(S, SingleScope);
4392 (void)SingleScope.Privatize();
4393 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
4396 auto LPCRegion =
4397 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4398 OMPLexicalScope Scope(*this, S, OMPD_unknown);
4399 CGM.getOpenMPRuntime().emitSingleRegion(*this, CodeGen, S.getBeginLoc(),
4400 CopyprivateVars, DestExprs,
4401 SrcExprs, AssignmentOps);
4403 // Emit an implicit barrier at the end (to avoid data race on firstprivate
4404 // init or if no 'nowait' clause was specified and no 'copyprivate' clause).
4405 if (!S.getSingleClause<OMPNowaitClause>() && CopyprivateVars.empty()) {
4406 CGM.getOpenMPRuntime().emitBarrierCall(
4407 *this, S.getBeginLoc(),
4408 S.getSingleClause<OMPNowaitClause>() ? OMPD_unknown : OMPD_single);
4410 // Check for outer lastprivate conditional update.
4411 checkForLastprivateConditionalUpdate(*this, S);
4414 static void emitMaster(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
4415 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4416 Action.Enter(CGF);
4417 CGF.EmitStmt(S.getRawStmt());
4419 CGF.CGM.getOpenMPRuntime().emitMasterRegion(CGF, CodeGen, S.getBeginLoc());
4422 void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
4423 if (CGM.getLangOpts().OpenMPIRBuilder) {
4424 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4425 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4427 const Stmt *MasterRegionBodyStmt = S.getAssociatedStmt();
4429 auto FiniCB = [this](InsertPointTy IP) {
4430 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4431 return llvm::Error::success();
4434 auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
4435 InsertPointTy CodeGenIP) {
4436 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4437 *this, MasterRegionBodyStmt, AllocaIP, CodeGenIP, "master");
4438 return llvm::Error::success();
4441 LexicalScope Scope(*this, S.getSourceRange());
4442 EmitStopPoint(&S);
4443 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
4444 OMPBuilder.createMaster(Builder, BodyGenCB, FiniCB);
4445 assert(AfterIP && "unexpected error creating master");
4446 Builder.restoreIP(*AfterIP);
4448 return;
4450 LexicalScope Scope(*this, S.getSourceRange());
4451 EmitStopPoint(&S);
4452 emitMaster(*this, S);
4455 static void emitMasked(CodeGenFunction &CGF, const OMPExecutableDirective &S) {
4456 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4457 Action.Enter(CGF);
4458 CGF.EmitStmt(S.getRawStmt());
4460 Expr *Filter = nullptr;
4461 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
4462 Filter = FilterClause->getThreadID();
4463 CGF.CGM.getOpenMPRuntime().emitMaskedRegion(CGF, CodeGen, S.getBeginLoc(),
4464 Filter);
4467 void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) {
4468 if (CGM.getLangOpts().OpenMPIRBuilder) {
4469 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4470 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4472 const Stmt *MaskedRegionBodyStmt = S.getAssociatedStmt();
4473 const Expr *Filter = nullptr;
4474 if (const auto *FilterClause = S.getSingleClause<OMPFilterClause>())
4475 Filter = FilterClause->getThreadID();
4476 llvm::Value *FilterVal = Filter
4477 ? EmitScalarExpr(Filter, CGM.Int32Ty)
4478 : llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/0);
4480 auto FiniCB = [this](InsertPointTy IP) {
4481 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4482 return llvm::Error::success();
4485 auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP,
4486 InsertPointTy CodeGenIP) {
4487 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4488 *this, MaskedRegionBodyStmt, AllocaIP, CodeGenIP, "masked");
4489 return llvm::Error::success();
4492 LexicalScope Scope(*this, S.getSourceRange());
4493 EmitStopPoint(&S);
4494 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
4495 OMPBuilder.createMasked(Builder, BodyGenCB, FiniCB, FilterVal);
4496 assert(AfterIP && "unexpected error creating masked");
4497 Builder.restoreIP(*AfterIP);
4499 return;
4501 LexicalScope Scope(*this, S.getSourceRange());
4502 EmitStopPoint(&S);
4503 emitMasked(*this, S);
4506 void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
4507 if (CGM.getLangOpts().OpenMPIRBuilder) {
4508 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
4509 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
4511 const Stmt *CriticalRegionBodyStmt = S.getAssociatedStmt();
4512 const Expr *Hint = nullptr;
4513 if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
4514 Hint = HintClause->getHint();
4516 // TODO: This is slightly different from what's currently being done in
4517 // clang. Fix the Int32Ty to IntPtrTy (pointer width size) when everything
4518 // about typing is final.
4519 llvm::Value *HintInst = nullptr;
4520 if (Hint)
4521 HintInst =
4522 Builder.CreateIntCast(EmitScalarExpr(Hint), CGM.Int32Ty, false);
4524 auto FiniCB = [this](InsertPointTy IP) {
4525 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
4526 return llvm::Error::success();
4529 auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
4530 InsertPointTy CodeGenIP) {
4531 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
4532 *this, CriticalRegionBodyStmt, AllocaIP, CodeGenIP, "critical");
4533 return llvm::Error::success();
4536 LexicalScope Scope(*this, S.getSourceRange());
4537 EmitStopPoint(&S);
4538 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
4539 OMPBuilder.createCritical(Builder, BodyGenCB, FiniCB,
4540 S.getDirectiveName().getAsString(), HintInst);
4541 assert(AfterIP && "unexpected error creating critical");
4542 Builder.restoreIP(*AfterIP);
4544 return;
4547 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4548 Action.Enter(CGF);
4549 CGF.EmitStmt(S.getAssociatedStmt());
4551 const Expr *Hint = nullptr;
4552 if (const auto *HintClause = S.getSingleClause<OMPHintClause>())
4553 Hint = HintClause->getHint();
4554 LexicalScope Scope(*this, S.getSourceRange());
4555 EmitStopPoint(&S);
4556 CGM.getOpenMPRuntime().emitCriticalRegion(*this,
4557 S.getDirectiveName().getAsString(),
4558 CodeGen, S.getBeginLoc(), Hint);
4561 void CodeGenFunction::EmitOMPParallelForDirective(
4562 const OMPParallelForDirective &S) {
4563 // Emit directive as a combined directive that consists of two implicit
4564 // directives: 'parallel' with 'for' directive.
4565 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4566 Action.Enter(CGF);
4567 emitOMPCopyinClause(CGF, S);
4568 (void)emitWorksharingDirective(CGF, S, S.hasCancel());
4571 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
4572 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
4573 CGCapturedStmtInfo CGSI(CR_OpenMP);
4574 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4575 OMPLoopScope LoopScope(CGF, S);
4576 return CGF.EmitScalarExpr(S.getNumIterations());
4578 bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
4579 [](const OMPReductionClause *C) {
4580 return C->getModifier() == OMPC_REDUCTION_inscan;
4582 if (IsInscan)
4583 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
4584 auto LPCRegion =
4585 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4586 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
4587 emitEmptyBoundParameters);
4588 if (IsInscan)
4589 emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
4591 // Check for outer lastprivate conditional update.
4592 checkForLastprivateConditionalUpdate(*this, S);
4595 void CodeGenFunction::EmitOMPParallelForSimdDirective(
4596 const OMPParallelForSimdDirective &S) {
4597 // Emit directive as a combined directive that consists of two implicit
4598 // directives: 'parallel' with 'for' directive.
4599 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4600 Action.Enter(CGF);
4601 emitOMPCopyinClause(CGF, S);
4602 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
4605 const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
4606 CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
4607 CGCapturedStmtInfo CGSI(CR_OpenMP);
4608 CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
4609 OMPLoopScope LoopScope(CGF, S);
4610 return CGF.EmitScalarExpr(S.getNumIterations());
4612 bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
4613 [](const OMPReductionClause *C) {
4614 return C->getModifier() == OMPC_REDUCTION_inscan;
4616 if (IsInscan)
4617 emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
4618 auto LPCRegion =
4619 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4620 emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
4621 emitEmptyBoundParameters);
4622 if (IsInscan)
4623 emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
4625 // Check for outer lastprivate conditional update.
4626 checkForLastprivateConditionalUpdate(*this, S);
4629 void CodeGenFunction::EmitOMPParallelMasterDirective(
4630 const OMPParallelMasterDirective &S) {
4631 // Emit directive as a combined directive that consists of two implicit
4632 // directives: 'parallel' with 'master' directive.
4633 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4634 Action.Enter(CGF);
4635 OMPPrivateScope PrivateScope(CGF);
4636 emitOMPCopyinClause(CGF, S);
4637 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4638 CGF.EmitOMPPrivateClause(S, PrivateScope);
4639 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4640 (void)PrivateScope.Privatize();
4641 emitMaster(CGF, S);
4642 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4645 auto LPCRegion =
4646 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4647 emitCommonOMPParallelDirective(*this, S, OMPD_master, CodeGen,
4648 emitEmptyBoundParameters);
4649 emitPostUpdateForReductionClause(*this, S,
4650 [](CodeGenFunction &) { return nullptr; });
4652 // Check for outer lastprivate conditional update.
4653 checkForLastprivateConditionalUpdate(*this, S);
4656 void CodeGenFunction::EmitOMPParallelMaskedDirective(
4657 const OMPParallelMaskedDirective &S) {
4658 // Emit directive as a combined directive that consists of two implicit
4659 // directives: 'parallel' with 'masked' directive.
4660 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4661 Action.Enter(CGF);
4662 OMPPrivateScope PrivateScope(CGF);
4663 emitOMPCopyinClause(CGF, S);
4664 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
4665 CGF.EmitOMPPrivateClause(S, PrivateScope);
4666 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
4667 (void)PrivateScope.Privatize();
4668 emitMasked(CGF, S);
4669 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
4672 auto LPCRegion =
4673 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4674 emitCommonOMPParallelDirective(*this, S, OMPD_masked, CodeGen,
4675 emitEmptyBoundParameters);
4676 emitPostUpdateForReductionClause(*this, S,
4677 [](CodeGenFunction &) { return nullptr; });
4679 // Check for outer lastprivate conditional update.
4680 checkForLastprivateConditionalUpdate(*this, S);
4683 void CodeGenFunction::EmitOMPParallelSectionsDirective(
4684 const OMPParallelSectionsDirective &S) {
4685 // Emit directive as a combined directive that consists of two implicit
4686 // directives: 'parallel' with 'sections' directive.
4687 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
4688 Action.Enter(CGF);
4689 emitOMPCopyinClause(CGF, S);
4690 CGF.EmitSections(S);
4693 auto LPCRegion =
4694 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
4695 emitCommonOMPParallelDirective(*this, S, OMPD_sections, CodeGen,
4696 emitEmptyBoundParameters);
4698 // Check for outer lastprivate conditional update.
4699 checkForLastprivateConditionalUpdate(*this, S);
4702 namespace {
4703 /// Get the list of variables declared in the context of the untied tasks.
4704 class CheckVarsEscapingUntiedTaskDeclContext final
4705 : public ConstStmtVisitor<CheckVarsEscapingUntiedTaskDeclContext> {
4706 llvm::SmallVector<const VarDecl *, 4> PrivateDecls;
4708 public:
4709 explicit CheckVarsEscapingUntiedTaskDeclContext() = default;
4710 virtual ~CheckVarsEscapingUntiedTaskDeclContext() = default;
4711 void VisitDeclStmt(const DeclStmt *S) {
4712 if (!S)
4713 return;
4714 // Need to privatize only local vars, static locals can be processed as is.
4715 for (const Decl *D : S->decls()) {
4716 if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
4717 if (VD->hasLocalStorage())
4718 PrivateDecls.push_back(VD);
4721 void VisitOMPExecutableDirective(const OMPExecutableDirective *) {}
4722 void VisitCapturedStmt(const CapturedStmt *) {}
4723 void VisitLambdaExpr(const LambdaExpr *) {}
4724 void VisitBlockExpr(const BlockExpr *) {}
4725 void VisitStmt(const Stmt *S) {
4726 if (!S)
4727 return;
4728 for (const Stmt *Child : S->children())
4729 if (Child)
4730 Visit(Child);
4733 /// Swaps list of vars with the provided one.
4734 ArrayRef<const VarDecl *> getPrivateDecls() const { return PrivateDecls; }
4736 } // anonymous namespace
4738 static void buildDependences(const OMPExecutableDirective &S,
4739 OMPTaskDataTy &Data) {
4741 // First look for 'omp_all_memory' and add this first.
4742 bool OmpAllMemory = false;
4743 if (llvm::any_of(
4744 S.getClausesOfKind<OMPDependClause>(), [](const OMPDependClause *C) {
4745 return C->getDependencyKind() == OMPC_DEPEND_outallmemory ||
4746 C->getDependencyKind() == OMPC_DEPEND_inoutallmemory;
4747 })) {
4748 OmpAllMemory = true;
4749 // Since both OMPC_DEPEND_outallmemory and OMPC_DEPEND_inoutallmemory are
4750 // equivalent to the runtime, always use OMPC_DEPEND_outallmemory to
4751 // simplify.
4752 OMPTaskDataTy::DependData &DD =
4753 Data.Dependences.emplace_back(OMPC_DEPEND_outallmemory,
4754 /*IteratorExpr=*/nullptr);
4755 // Add a nullptr Expr to simplify the codegen in emitDependData.
4756 DD.DepExprs.push_back(nullptr);
4758 // Add remaining dependences skipping any 'out' or 'inout' if they are
4759 // overridden by 'omp_all_memory'.
4760 for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
4761 OpenMPDependClauseKind Kind = C->getDependencyKind();
4762 if (Kind == OMPC_DEPEND_outallmemory || Kind == OMPC_DEPEND_inoutallmemory)
4763 continue;
4764 if (OmpAllMemory && (Kind == OMPC_DEPEND_out || Kind == OMPC_DEPEND_inout))
4765 continue;
4766 OMPTaskDataTy::DependData &DD =
4767 Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
4768 DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
4772 void CodeGenFunction::EmitOMPTaskBasedDirective(
4773 const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
4774 const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
4775 OMPTaskDataTy &Data) {
4776 // Emit outlined function for task construct.
4777 const CapturedStmt *CS = S.getCapturedStmt(CapturedRegion);
4778 auto I = CS->getCapturedDecl()->param_begin();
4779 auto PartId = std::next(I);
4780 auto TaskT = std::next(I, 4);
4781 // Check if the task is final
4782 if (const auto *Clause = S.getSingleClause<OMPFinalClause>()) {
4783 // If the condition constant folds and can be elided, try to avoid emitting
4784 // the condition and the dead arm of the if/else.
4785 const Expr *Cond = Clause->getCondition();
4786 bool CondConstant;
4787 if (ConstantFoldsToSimpleInteger(Cond, CondConstant))
4788 Data.Final.setInt(CondConstant);
4789 else
4790 Data.Final.setPointer(EvaluateExprAsBool(Cond));
4791 } else {
4792 // By default the task is not final.
4793 Data.Final.setInt(/*IntVal=*/false);
4795 // Check if the task has 'priority' clause.
4796 if (const auto *Clause = S.getSingleClause<OMPPriorityClause>()) {
4797 const Expr *Prio = Clause->getPriority();
4798 Data.Priority.setInt(/*IntVal=*/true);
4799 Data.Priority.setPointer(EmitScalarConversion(
4800 EmitScalarExpr(Prio), Prio->getType(),
4801 getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1),
4802 Prio->getExprLoc()));
4804 // The first function argument for tasks is a thread id, the second one is a
4805 // part id (0 for tied tasks, >=0 for untied task).
4806 llvm::DenseSet<const VarDecl *> EmittedAsPrivate;
4807 // Get list of private variables.
4808 for (const auto *C : S.getClausesOfKind<OMPPrivateClause>()) {
4809 auto IRef = C->varlist_begin();
4810 for (const Expr *IInit : C->private_copies()) {
4811 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4812 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4813 Data.PrivateVars.push_back(*IRef);
4814 Data.PrivateCopies.push_back(IInit);
4816 ++IRef;
4819 EmittedAsPrivate.clear();
4820 // Get list of firstprivate variables.
4821 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
4822 auto IRef = C->varlist_begin();
4823 auto IElemInitRef = C->inits().begin();
4824 for (const Expr *IInit : C->private_copies()) {
4825 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4826 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4827 Data.FirstprivateVars.push_back(*IRef);
4828 Data.FirstprivateCopies.push_back(IInit);
4829 Data.FirstprivateInits.push_back(*IElemInitRef);
4831 ++IRef;
4832 ++IElemInitRef;
4835 // Get list of lastprivate variables (for taskloops).
4836 llvm::MapVector<const VarDecl *, const DeclRefExpr *> LastprivateDstsOrigs;
4837 for (const auto *C : S.getClausesOfKind<OMPLastprivateClause>()) {
4838 auto IRef = C->varlist_begin();
4839 auto ID = C->destination_exprs().begin();
4840 for (const Expr *IInit : C->private_copies()) {
4841 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
4842 if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
4843 Data.LastprivateVars.push_back(*IRef);
4844 Data.LastprivateCopies.push_back(IInit);
4846 LastprivateDstsOrigs.insert(
4847 std::make_pair(cast<VarDecl>(cast<DeclRefExpr>(*ID)->getDecl()),
4848 cast<DeclRefExpr>(*IRef)));
4849 ++IRef;
4850 ++ID;
4853 SmallVector<const Expr *, 4> LHSs;
4854 SmallVector<const Expr *, 4> RHSs;
4855 for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
4856 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
4857 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
4858 Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
4859 Data.ReductionOps.append(C->reduction_ops().begin(),
4860 C->reduction_ops().end());
4861 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
4862 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
4864 Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
4865 *this, S.getBeginLoc(), LHSs, RHSs, Data);
4866 // Build list of dependences.
4867 buildDependences(S, Data);
4868 // Get list of local vars for untied tasks.
4869 if (!Data.Tied) {
4870 CheckVarsEscapingUntiedTaskDeclContext Checker;
4871 Checker.Visit(S.getInnermostCapturedStmt()->getCapturedStmt());
4872 Data.PrivateLocals.append(Checker.getPrivateDecls().begin(),
4873 Checker.getPrivateDecls().end());
4875 auto &&CodeGen = [&Data, &S, CS, &BodyGen, &LastprivateDstsOrigs,
4876 CapturedRegion](CodeGenFunction &CGF,
4877 PrePostActionTy &Action) {
4878 llvm::MapVector<CanonicalDeclPtr<const VarDecl>,
4879 std::pair<Address, Address>>
4880 UntiedLocalVars;
4881 // Set proper addresses for generated private copies.
4882 OMPPrivateScope Scope(CGF);
4883 // Generate debug info for variables present in shared clause.
4884 if (auto *DI = CGF.getDebugInfo()) {
4885 llvm::SmallDenseMap<const VarDecl *, FieldDecl *> CaptureFields =
4886 CGF.CapturedStmtInfo->getCaptureFields();
4887 llvm::Value *ContextValue = CGF.CapturedStmtInfo->getContextValue();
4888 if (CaptureFields.size() && ContextValue) {
4889 unsigned CharWidth = CGF.getContext().getCharWidth();
4890 // The shared variables are packed together as members of structure.
4891 // So the address of each shared variable can be computed by adding
4892 // offset of it (within record) to the base address of record. For each
4893 // shared variable, debug intrinsic llvm.dbg.declare is generated with
4894 // appropriate expressions (DIExpression).
4895 // Ex:
4896 // %12 = load %struct.anon*, %struct.anon** %__context.addr.i
4897 // call void @llvm.dbg.declare(metadata %struct.anon* %12,
4898 // metadata !svar1,
4899 // metadata !DIExpression(DW_OP_deref))
4900 // call void @llvm.dbg.declare(metadata %struct.anon* %12,
4901 // metadata !svar2,
4902 // metadata !DIExpression(DW_OP_plus_uconst, 8, DW_OP_deref))
4903 for (auto It = CaptureFields.begin(); It != CaptureFields.end(); ++It) {
4904 const VarDecl *SharedVar = It->first;
4905 RecordDecl *CaptureRecord = It->second->getParent();
4906 const ASTRecordLayout &Layout =
4907 CGF.getContext().getASTRecordLayout(CaptureRecord);
4908 unsigned Offset =
4909 Layout.getFieldOffset(It->second->getFieldIndex()) / CharWidth;
4910 if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
4911 (void)DI->EmitDeclareOfAutoVariable(SharedVar, ContextValue,
4912 CGF.Builder, false);
4913 // Get the call dbg.declare instruction we just created and update
4914 // its DIExpression to add offset to base address.
4915 auto UpdateExpr = [](llvm::LLVMContext &Ctx, auto *Declare,
4916 unsigned Offset) {
4917 SmallVector<uint64_t, 8> Ops;
4918 // Add offset to the base address if non zero.
4919 if (Offset) {
4920 Ops.push_back(llvm::dwarf::DW_OP_plus_uconst);
4921 Ops.push_back(Offset);
4923 Ops.push_back(llvm::dwarf::DW_OP_deref);
4924 Declare->setExpression(llvm::DIExpression::get(Ctx, Ops));
4926 llvm::Instruction &Last = CGF.Builder.GetInsertBlock()->back();
4927 if (auto DDI = dyn_cast<llvm::DbgVariableIntrinsic>(&Last))
4928 UpdateExpr(DDI->getContext(), DDI, Offset);
4929 // If we're emitting using the new debug info format into a block
4930 // without a terminator, the record will be "trailing".
4931 assert(!Last.isTerminator() && "unexpected terminator");
4932 if (auto *Marker =
4933 CGF.Builder.GetInsertBlock()->getTrailingDbgRecords()) {
4934 for (llvm::DbgVariableRecord &DVR : llvm::reverse(
4935 llvm::filterDbgVars(Marker->getDbgRecordRange()))) {
4936 UpdateExpr(Last.getContext(), &DVR, Offset);
4937 break;
4943 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> FirstprivatePtrs;
4944 if (!Data.PrivateVars.empty() || !Data.FirstprivateVars.empty() ||
4945 !Data.LastprivateVars.empty() || !Data.PrivateLocals.empty()) {
4946 enum { PrivatesParam = 2, CopyFnParam = 3 };
4947 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
4948 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
4949 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
4950 CS->getCapturedDecl()->getParam(PrivatesParam)));
4951 // Map privates.
4952 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
4953 llvm::SmallVector<llvm::Value *, 16> CallArgs;
4954 llvm::SmallVector<llvm::Type *, 4> ParamTypes;
4955 CallArgs.push_back(PrivatesPtr);
4956 ParamTypes.push_back(PrivatesPtr->getType());
4957 for (const Expr *E : Data.PrivateVars) {
4958 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4959 RawAddress PrivatePtr = CGF.CreateMemTemp(
4960 CGF.getContext().getPointerType(E->getType()), ".priv.ptr.addr");
4961 PrivatePtrs.emplace_back(VD, PrivatePtr);
4962 CallArgs.push_back(PrivatePtr.getPointer());
4963 ParamTypes.push_back(PrivatePtr.getType());
4965 for (const Expr *E : Data.FirstprivateVars) {
4966 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4967 RawAddress PrivatePtr =
4968 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4969 ".firstpriv.ptr.addr");
4970 PrivatePtrs.emplace_back(VD, PrivatePtr);
4971 FirstprivatePtrs.emplace_back(VD, PrivatePtr);
4972 CallArgs.push_back(PrivatePtr.getPointer());
4973 ParamTypes.push_back(PrivatePtr.getType());
4975 for (const Expr *E : Data.LastprivateVars) {
4976 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
4977 RawAddress PrivatePtr =
4978 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
4979 ".lastpriv.ptr.addr");
4980 PrivatePtrs.emplace_back(VD, PrivatePtr);
4981 CallArgs.push_back(PrivatePtr.getPointer());
4982 ParamTypes.push_back(PrivatePtr.getType());
4984 for (const VarDecl *VD : Data.PrivateLocals) {
4985 QualType Ty = VD->getType().getNonReferenceType();
4986 if (VD->getType()->isLValueReferenceType())
4987 Ty = CGF.getContext().getPointerType(Ty);
4988 if (isAllocatableDecl(VD))
4989 Ty = CGF.getContext().getPointerType(Ty);
4990 RawAddress PrivatePtr = CGF.CreateMemTemp(
4991 CGF.getContext().getPointerType(Ty), ".local.ptr.addr");
4992 auto Result = UntiedLocalVars.insert(
4993 std::make_pair(VD, std::make_pair(PrivatePtr, Address::invalid())));
4994 // If key exists update in place.
4995 if (Result.second == false)
4996 *Result.first = std::make_pair(
4997 VD, std::make_pair(PrivatePtr, Address::invalid()));
4998 CallArgs.push_back(PrivatePtr.getPointer());
4999 ParamTypes.push_back(PrivatePtr.getType());
5001 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
5002 ParamTypes, /*isVarArg=*/false);
5003 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
5004 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
5005 for (const auto &Pair : LastprivateDstsOrigs) {
5006 const auto *OrigVD = cast<VarDecl>(Pair.second->getDecl());
5007 DeclRefExpr DRE(CGF.getContext(), const_cast<VarDecl *>(OrigVD),
5008 /*RefersToEnclosingVariableOrCapture=*/
5009 CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
5010 Pair.second->getType(), VK_LValue,
5011 Pair.second->getExprLoc());
5012 Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress());
5014 for (const auto &Pair : PrivatePtrs) {
5015 Address Replacement = Address(
5016 CGF.Builder.CreateLoad(Pair.second),
5017 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
5018 CGF.getContext().getDeclAlign(Pair.first));
5019 Scope.addPrivate(Pair.first, Replacement);
5020 if (auto *DI = CGF.getDebugInfo())
5021 if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
5022 (void)DI->EmitDeclareOfAutoVariable(
5023 Pair.first, Pair.second.getBasePointer(), CGF.Builder,
5024 /*UsePointerValue*/ true);
5026 // Adjust mapping for internal locals by mapping actual memory instead of
5027 // a pointer to this memory.
5028 for (auto &Pair : UntiedLocalVars) {
5029 QualType VDType = Pair.first->getType().getNonReferenceType();
5030 if (Pair.first->getType()->isLValueReferenceType())
5031 VDType = CGF.getContext().getPointerType(VDType);
5032 if (isAllocatableDecl(Pair.first)) {
5033 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
5034 Address Replacement(
5035 Ptr,
5036 CGF.ConvertTypeForMem(CGF.getContext().getPointerType(VDType)),
5037 CGF.getPointerAlign());
5038 Pair.second.first = Replacement;
5039 Ptr = CGF.Builder.CreateLoad(Replacement);
5040 Replacement = Address(Ptr, CGF.ConvertTypeForMem(VDType),
5041 CGF.getContext().getDeclAlign(Pair.first));
5042 Pair.second.second = Replacement;
5043 } else {
5044 llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
5045 Address Replacement(Ptr, CGF.ConvertTypeForMem(VDType),
5046 CGF.getContext().getDeclAlign(Pair.first));
5047 Pair.second.first = Replacement;
5051 if (Data.Reductions) {
5052 OMPPrivateScope FirstprivateScope(CGF);
5053 for (const auto &Pair : FirstprivatePtrs) {
5054 Address Replacement(
5055 CGF.Builder.CreateLoad(Pair.second),
5056 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
5057 CGF.getContext().getDeclAlign(Pair.first));
5058 FirstprivateScope.addPrivate(Pair.first, Replacement);
5060 (void)FirstprivateScope.Privatize();
5061 OMPLexicalScope LexScope(CGF, S, CapturedRegion);
5062 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
5063 Data.ReductionCopies, Data.ReductionOps);
5064 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
5065 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(9)));
5066 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
5067 RedCG.emitSharedOrigLValue(CGF, Cnt);
5068 RedCG.emitAggregateType(CGF, Cnt);
5069 // FIXME: This must removed once the runtime library is fixed.
5070 // Emit required threadprivate variables for
5071 // initializer/combiner/finalizer.
5072 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5073 RedCG, Cnt);
5074 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5075 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5076 Replacement = Address(
5077 CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF),
5078 CGF.getContext().VoidPtrTy,
5079 CGF.getContext().getPointerType(
5080 Data.ReductionCopies[Cnt]->getType()),
5081 Data.ReductionCopies[Cnt]->getExprLoc()),
5082 CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
5083 Replacement.getAlignment());
5084 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5085 Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5088 // Privatize all private variables except for in_reduction items.
5089 (void)Scope.Privatize();
5090 SmallVector<const Expr *, 4> InRedVars;
5091 SmallVector<const Expr *, 4> InRedPrivs;
5092 SmallVector<const Expr *, 4> InRedOps;
5093 SmallVector<const Expr *, 4> TaskgroupDescriptors;
5094 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
5095 auto IPriv = C->privates().begin();
5096 auto IRed = C->reduction_ops().begin();
5097 auto ITD = C->taskgroup_descriptors().begin();
5098 for (const Expr *Ref : C->varlist()) {
5099 InRedVars.emplace_back(Ref);
5100 InRedPrivs.emplace_back(*IPriv);
5101 InRedOps.emplace_back(*IRed);
5102 TaskgroupDescriptors.emplace_back(*ITD);
5103 std::advance(IPriv, 1);
5104 std::advance(IRed, 1);
5105 std::advance(ITD, 1);
5108 // Privatize in_reduction items here, because taskgroup descriptors must be
5109 // privatized earlier.
5110 OMPPrivateScope InRedScope(CGF);
5111 if (!InRedVars.empty()) {
5112 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
5113 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
5114 RedCG.emitSharedOrigLValue(CGF, Cnt);
5115 RedCG.emitAggregateType(CGF, Cnt);
5116 // The taskgroup descriptor variable is always implicit firstprivate and
5117 // privatized already during processing of the firstprivates.
5118 // FIXME: This must removed once the runtime library is fixed.
5119 // Emit required threadprivate variables for
5120 // initializer/combiner/finalizer.
5121 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5122 RedCG, Cnt);
5123 llvm::Value *ReductionsPtr;
5124 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
5125 ReductionsPtr = CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr),
5126 TRExpr->getExprLoc());
5127 } else {
5128 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5130 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5131 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5132 Replacement = Address(
5133 CGF.EmitScalarConversion(
5134 Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy,
5135 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
5136 InRedPrivs[Cnt]->getExprLoc()),
5137 CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
5138 Replacement.getAlignment());
5139 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5140 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5143 (void)InRedScope.Privatize();
5145 CGOpenMPRuntime::UntiedTaskLocalDeclsRAII LocalVarsScope(CGF,
5146 UntiedLocalVars);
5147 Action.Enter(CGF);
5148 BodyGen(CGF);
5150 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
5151 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
5152 S, *I, *PartId, *TaskT, EKind, CodeGen, Data.Tied, Data.NumberOfParts);
5153 OMPLexicalScope Scope(*this, S, std::nullopt,
5154 !isOpenMPParallelDirective(EKind) &&
5155 !isOpenMPSimdDirective(EKind));
5156 TaskGen(*this, OutlinedFn, Data);
5159 static ImplicitParamDecl *
5160 createImplicitFirstprivateForType(ASTContext &C, OMPTaskDataTy &Data,
5161 QualType Ty, CapturedDecl *CD,
5162 SourceLocation Loc) {
5163 auto *OrigVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
5164 ImplicitParamKind::Other);
5165 auto *OrigRef = DeclRefExpr::Create(
5166 C, NestedNameSpecifierLoc(), SourceLocation(), OrigVD,
5167 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
5168 auto *PrivateVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, Ty,
5169 ImplicitParamKind::Other);
5170 auto *PrivateRef = DeclRefExpr::Create(
5171 C, NestedNameSpecifierLoc(), SourceLocation(), PrivateVD,
5172 /*RefersToEnclosingVariableOrCapture=*/false, Loc, Ty, VK_LValue);
5173 QualType ElemType = C.getBaseElementType(Ty);
5174 auto *InitVD = ImplicitParamDecl::Create(C, CD, Loc, /*Id=*/nullptr, ElemType,
5175 ImplicitParamKind::Other);
5176 auto *InitRef = DeclRefExpr::Create(
5177 C, NestedNameSpecifierLoc(), SourceLocation(), InitVD,
5178 /*RefersToEnclosingVariableOrCapture=*/false, Loc, ElemType, VK_LValue);
5179 PrivateVD->setInitStyle(VarDecl::CInit);
5180 PrivateVD->setInit(ImplicitCastExpr::Create(C, ElemType, CK_LValueToRValue,
5181 InitRef, /*BasePath=*/nullptr,
5182 VK_PRValue, FPOptionsOverride()));
5183 Data.FirstprivateVars.emplace_back(OrigRef);
5184 Data.FirstprivateCopies.emplace_back(PrivateRef);
5185 Data.FirstprivateInits.emplace_back(InitRef);
5186 return OrigVD;
5189 void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
5190 const OMPExecutableDirective &S, const RegionCodeGenTy &BodyGen,
5191 OMPTargetDataInfo &InputInfo) {
5192 // Emit outlined function for task construct.
5193 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
5194 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
5195 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
5196 auto I = CS->getCapturedDecl()->param_begin();
5197 auto PartId = std::next(I);
5198 auto TaskT = std::next(I, 4);
5199 OMPTaskDataTy Data;
5200 // The task is not final.
5201 Data.Final.setInt(/*IntVal=*/false);
5202 // Get list of firstprivate variables.
5203 for (const auto *C : S.getClausesOfKind<OMPFirstprivateClause>()) {
5204 auto IRef = C->varlist_begin();
5205 auto IElemInitRef = C->inits().begin();
5206 for (auto *IInit : C->private_copies()) {
5207 Data.FirstprivateVars.push_back(*IRef);
5208 Data.FirstprivateCopies.push_back(IInit);
5209 Data.FirstprivateInits.push_back(*IElemInitRef);
5210 ++IRef;
5211 ++IElemInitRef;
5214 SmallVector<const Expr *, 4> LHSs;
5215 SmallVector<const Expr *, 4> RHSs;
5216 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
5217 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
5218 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
5219 Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
5220 Data.ReductionOps.append(C->reduction_ops().begin(),
5221 C->reduction_ops().end());
5222 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5223 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5225 OMPPrivateScope TargetScope(*this);
5226 VarDecl *BPVD = nullptr;
5227 VarDecl *PVD = nullptr;
5228 VarDecl *SVD = nullptr;
5229 VarDecl *MVD = nullptr;
5230 if (InputInfo.NumberOfTargetItems > 0) {
5231 auto *CD = CapturedDecl::Create(
5232 getContext(), getContext().getTranslationUnitDecl(), /*NumParams=*/0);
5233 llvm::APInt ArrSize(/*numBits=*/32, InputInfo.NumberOfTargetItems);
5234 QualType BaseAndPointerAndMapperType = getContext().getConstantArrayType(
5235 getContext().VoidPtrTy, ArrSize, nullptr, ArraySizeModifier::Normal,
5236 /*IndexTypeQuals=*/0);
5237 BPVD = createImplicitFirstprivateForType(
5238 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5239 PVD = createImplicitFirstprivateForType(
5240 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5241 QualType SizesType = getContext().getConstantArrayType(
5242 getContext().getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1),
5243 ArrSize, nullptr, ArraySizeModifier::Normal,
5244 /*IndexTypeQuals=*/0);
5245 SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
5246 S.getBeginLoc());
5247 TargetScope.addPrivate(BPVD, InputInfo.BasePointersArray);
5248 TargetScope.addPrivate(PVD, InputInfo.PointersArray);
5249 TargetScope.addPrivate(SVD, InputInfo.SizesArray);
5250 // If there is no user-defined mapper, the mapper array will be nullptr. In
5251 // this case, we don't need to privatize it.
5252 if (!isa_and_nonnull<llvm::ConstantPointerNull>(
5253 InputInfo.MappersArray.emitRawPointer(*this))) {
5254 MVD = createImplicitFirstprivateForType(
5255 getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
5256 TargetScope.addPrivate(MVD, InputInfo.MappersArray);
5259 (void)TargetScope.Privatize();
5260 buildDependences(S, Data);
5261 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
5262 auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD, EKind,
5263 &InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
5264 // Set proper addresses for generated private copies.
5265 OMPPrivateScope Scope(CGF);
5266 if (!Data.FirstprivateVars.empty()) {
5267 enum { PrivatesParam = 2, CopyFnParam = 3 };
5268 llvm::Value *CopyFn = CGF.Builder.CreateLoad(
5269 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(CopyFnParam)));
5270 llvm::Value *PrivatesPtr = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(
5271 CS->getCapturedDecl()->getParam(PrivatesParam)));
5272 // Map privates.
5273 llvm::SmallVector<std::pair<const VarDecl *, Address>, 16> PrivatePtrs;
5274 llvm::SmallVector<llvm::Value *, 16> CallArgs;
5275 llvm::SmallVector<llvm::Type *, 4> ParamTypes;
5276 CallArgs.push_back(PrivatesPtr);
5277 ParamTypes.push_back(PrivatesPtr->getType());
5278 for (const Expr *E : Data.FirstprivateVars) {
5279 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5280 RawAddress PrivatePtr =
5281 CGF.CreateMemTemp(CGF.getContext().getPointerType(E->getType()),
5282 ".firstpriv.ptr.addr");
5283 PrivatePtrs.emplace_back(VD, PrivatePtr);
5284 CallArgs.push_back(PrivatePtr.getPointer());
5285 ParamTypes.push_back(PrivatePtr.getType());
5287 auto *CopyFnTy = llvm::FunctionType::get(CGF.Builder.getVoidTy(),
5288 ParamTypes, /*isVarArg=*/false);
5289 CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
5290 CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
5291 for (const auto &Pair : PrivatePtrs) {
5292 Address Replacement(
5293 CGF.Builder.CreateLoad(Pair.second),
5294 CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
5295 CGF.getContext().getDeclAlign(Pair.first));
5296 Scope.addPrivate(Pair.first, Replacement);
5299 CGF.processInReduction(S, Data, CGF, CS, Scope);
5300 if (InputInfo.NumberOfTargetItems > 0) {
5301 InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
5302 CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
5303 InputInfo.PointersArray = CGF.Builder.CreateConstArrayGEP(
5304 CGF.GetAddrOfLocalVar(PVD), /*Index=*/0);
5305 InputInfo.SizesArray = CGF.Builder.CreateConstArrayGEP(
5306 CGF.GetAddrOfLocalVar(SVD), /*Index=*/0);
5307 // If MVD is nullptr, the mapper array is not privatized
5308 if (MVD)
5309 InputInfo.MappersArray = CGF.Builder.CreateConstArrayGEP(
5310 CGF.GetAddrOfLocalVar(MVD), /*Index=*/0);
5313 Action.Enter(CGF);
5314 OMPLexicalScope LexScope(CGF, S, OMPD_task, /*EmitPreInitStmt=*/false);
5315 auto *TL = S.getSingleClause<OMPThreadLimitClause>();
5316 if (CGF.CGM.getLangOpts().OpenMP >= 51 &&
5317 needsTaskBasedThreadLimit(EKind) && TL) {
5318 // Emit __kmpc_set_thread_limit() to set the thread_limit for the task
5319 // enclosing this target region. This will indirectly set the thread_limit
5320 // for every applicable construct within target region.
5321 CGF.CGM.getOpenMPRuntime().emitThreadLimitClause(
5322 CGF, TL->getThreadLimit().front(), S.getBeginLoc());
5324 BodyGen(CGF);
5326 llvm::Function *OutlinedFn = CGM.getOpenMPRuntime().emitTaskOutlinedFunction(
5327 S, *I, *PartId, *TaskT, EKind, CodeGen, /*Tied=*/true,
5328 Data.NumberOfParts);
5329 llvm::APInt TrueOrFalse(32, S.hasClausesOfKind<OMPNowaitClause>() ? 1 : 0);
5330 IntegerLiteral IfCond(getContext(), TrueOrFalse,
5331 getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
5332 SourceLocation());
5333 CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
5334 SharedsTy, CapturedStruct, &IfCond, Data);
5337 void CodeGenFunction::processInReduction(const OMPExecutableDirective &S,
5338 OMPTaskDataTy &Data,
5339 CodeGenFunction &CGF,
5340 const CapturedStmt *CS,
5341 OMPPrivateScope &Scope) {
5342 OpenMPDirectiveKind EKind = getEffectiveDirectiveKind(S);
5343 if (Data.Reductions) {
5344 OpenMPDirectiveKind CapturedRegion = EKind;
5345 OMPLexicalScope LexScope(CGF, S, CapturedRegion);
5346 ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
5347 Data.ReductionCopies, Data.ReductionOps);
5348 llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
5349 CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(4)));
5350 for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
5351 RedCG.emitSharedOrigLValue(CGF, Cnt);
5352 RedCG.emitAggregateType(CGF, Cnt);
5353 // FIXME: This must removed once the runtime library is fixed.
5354 // Emit required threadprivate variables for
5355 // initializer/combiner/finalizer.
5356 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5357 RedCG, Cnt);
5358 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5359 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5360 Replacement = Address(
5361 CGF.EmitScalarConversion(Replacement.emitRawPointer(CGF),
5362 CGF.getContext().VoidPtrTy,
5363 CGF.getContext().getPointerType(
5364 Data.ReductionCopies[Cnt]->getType()),
5365 Data.ReductionCopies[Cnt]->getExprLoc()),
5366 CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
5367 Replacement.getAlignment());
5368 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5369 Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5372 (void)Scope.Privatize();
5373 SmallVector<const Expr *, 4> InRedVars;
5374 SmallVector<const Expr *, 4> InRedPrivs;
5375 SmallVector<const Expr *, 4> InRedOps;
5376 SmallVector<const Expr *, 4> TaskgroupDescriptors;
5377 for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
5378 auto IPriv = C->privates().begin();
5379 auto IRed = C->reduction_ops().begin();
5380 auto ITD = C->taskgroup_descriptors().begin();
5381 for (const Expr *Ref : C->varlist()) {
5382 InRedVars.emplace_back(Ref);
5383 InRedPrivs.emplace_back(*IPriv);
5384 InRedOps.emplace_back(*IRed);
5385 TaskgroupDescriptors.emplace_back(*ITD);
5386 std::advance(IPriv, 1);
5387 std::advance(IRed, 1);
5388 std::advance(ITD, 1);
5391 OMPPrivateScope InRedScope(CGF);
5392 if (!InRedVars.empty()) {
5393 ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
5394 for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
5395 RedCG.emitSharedOrigLValue(CGF, Cnt);
5396 RedCG.emitAggregateType(CGF, Cnt);
5397 // FIXME: This must removed once the runtime library is fixed.
5398 // Emit required threadprivate variables for
5399 // initializer/combiner/finalizer.
5400 CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
5401 RedCG, Cnt);
5402 llvm::Value *ReductionsPtr;
5403 if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
5404 ReductionsPtr =
5405 CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), TRExpr->getExprLoc());
5406 } else {
5407 ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
5409 Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
5410 CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
5411 Replacement = Address(
5412 CGF.EmitScalarConversion(
5413 Replacement.emitRawPointer(CGF), CGF.getContext().VoidPtrTy,
5414 CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
5415 InRedPrivs[Cnt]->getExprLoc()),
5416 CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
5417 Replacement.getAlignment());
5418 Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
5419 InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
5422 (void)InRedScope.Privatize();
5425 void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
5426 // Emit outlined function for task construct.
5427 const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
5428 Address CapturedStruct = GenerateCapturedStmtArgument(*CS);
5429 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
5430 const Expr *IfCond = nullptr;
5431 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
5432 if (C->getNameModifier() == OMPD_unknown ||
5433 C->getNameModifier() == OMPD_task) {
5434 IfCond = C->getCondition();
5435 break;
5439 OMPTaskDataTy Data;
5440 // Check if we should emit tied or untied task.
5441 Data.Tied = !S.getSingleClause<OMPUntiedClause>();
5442 auto &&BodyGen = [CS](CodeGenFunction &CGF, PrePostActionTy &) {
5443 CGF.EmitStmt(CS->getCapturedStmt());
5445 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
5446 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
5447 const OMPTaskDataTy &Data) {
5448 CGF.CGM.getOpenMPRuntime().emitTaskCall(CGF, S.getBeginLoc(), S, OutlinedFn,
5449 SharedsTy, CapturedStruct, IfCond,
5450 Data);
5452 auto LPCRegion =
5453 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
5454 EmitOMPTaskBasedDirective(S, OMPD_task, BodyGen, TaskGen, Data);
5457 void CodeGenFunction::EmitOMPTaskyieldDirective(
5458 const OMPTaskyieldDirective &S) {
5459 CGM.getOpenMPRuntime().emitTaskyieldCall(*this, S.getBeginLoc());
5462 void CodeGenFunction::EmitOMPErrorDirective(const OMPErrorDirective &S) {
5463 const OMPMessageClause *MC = S.getSingleClause<OMPMessageClause>();
5464 Expr *ME = MC ? MC->getMessageString() : nullptr;
5465 const OMPSeverityClause *SC = S.getSingleClause<OMPSeverityClause>();
5466 bool IsFatal = false;
5467 if (!SC || SC->getSeverityKind() == OMPC_SEVERITY_fatal)
5468 IsFatal = true;
5469 CGM.getOpenMPRuntime().emitErrorCall(*this, S.getBeginLoc(), ME, IsFatal);
5472 void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
5473 CGM.getOpenMPRuntime().emitBarrierCall(*this, S.getBeginLoc(), OMPD_barrier);
5476 void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
5477 OMPTaskDataTy Data;
5478 // Build list of dependences
5479 buildDependences(S, Data);
5480 Data.HasNowaitClause = S.hasClausesOfKind<OMPNowaitClause>();
5481 CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc(), Data);
5484 static bool isSupportedByOpenMPIRBuilder(const OMPTaskgroupDirective &T) {
5485 return T.clauses().empty();
5488 void CodeGenFunction::EmitOMPTaskgroupDirective(
5489 const OMPTaskgroupDirective &S) {
5490 OMPLexicalScope Scope(*this, S, OMPD_unknown);
5491 if (CGM.getLangOpts().OpenMPIRBuilder && isSupportedByOpenMPIRBuilder(S)) {
5492 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
5493 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
5494 InsertPointTy AllocaIP(AllocaInsertPt->getParent(),
5495 AllocaInsertPt->getIterator());
5497 auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
5498 InsertPointTy CodeGenIP) {
5499 Builder.restoreIP(CodeGenIP);
5500 EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5501 return llvm::Error::success();
5503 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
5504 if (!CapturedStmtInfo)
5505 CapturedStmtInfo = &CapStmtInfo;
5506 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
5507 OMPBuilder.createTaskgroup(Builder, AllocaIP, BodyGenCB);
5508 assert(AfterIP && "unexpected error creating taskgroup");
5509 Builder.restoreIP(*AfterIP);
5510 return;
5512 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
5513 Action.Enter(CGF);
5514 if (const Expr *E = S.getReductionRef()) {
5515 SmallVector<const Expr *, 4> LHSs;
5516 SmallVector<const Expr *, 4> RHSs;
5517 OMPTaskDataTy Data;
5518 for (const auto *C : S.getClausesOfKind<OMPTaskReductionClause>()) {
5519 Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
5520 Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
5521 Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
5522 Data.ReductionOps.append(C->reduction_ops().begin(),
5523 C->reduction_ops().end());
5524 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5525 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5527 llvm::Value *ReductionDesc =
5528 CGF.CGM.getOpenMPRuntime().emitTaskReductionInit(CGF, S.getBeginLoc(),
5529 LHSs, RHSs, Data);
5530 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
5531 CGF.EmitVarDecl(*VD);
5532 CGF.EmitStoreOfScalar(ReductionDesc, CGF.GetAddrOfLocalVar(VD),
5533 /*Volatile=*/false, E->getType());
5535 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
5537 CGM.getOpenMPRuntime().emitTaskgroupRegion(*this, CodeGen, S.getBeginLoc());
5540 void CodeGenFunction::EmitOMPFlushDirective(const OMPFlushDirective &S) {
5541 llvm::AtomicOrdering AO = S.getSingleClause<OMPFlushClause>()
5542 ? llvm::AtomicOrdering::NotAtomic
5543 : llvm::AtomicOrdering::AcquireRelease;
5544 CGM.getOpenMPRuntime().emitFlush(
5545 *this,
5546 [&S]() -> ArrayRef<const Expr *> {
5547 if (const auto *FlushClause = S.getSingleClause<OMPFlushClause>())
5548 return llvm::ArrayRef(FlushClause->varlist_begin(),
5549 FlushClause->varlist_end());
5550 return {};
5551 }(),
5552 S.getBeginLoc(), AO);
5555 void CodeGenFunction::EmitOMPDepobjDirective(const OMPDepobjDirective &S) {
5556 const auto *DO = S.getSingleClause<OMPDepobjClause>();
5557 LValue DOLVal = EmitLValue(DO->getDepobj());
5558 if (const auto *DC = S.getSingleClause<OMPDependClause>()) {
5559 // Build list and emit dependences
5560 OMPTaskDataTy Data;
5561 buildDependences(S, Data);
5562 for (auto &Dep : Data.Dependences) {
5563 Address DepAddr = CGM.getOpenMPRuntime().emitDepobjDependClause(
5564 *this, Dep, DC->getBeginLoc());
5565 EmitStoreOfScalar(DepAddr.emitRawPointer(*this), DOLVal);
5567 return;
5569 if (const auto *DC = S.getSingleClause<OMPDestroyClause>()) {
5570 CGM.getOpenMPRuntime().emitDestroyClause(*this, DOLVal, DC->getBeginLoc());
5571 return;
5573 if (const auto *UC = S.getSingleClause<OMPUpdateClause>()) {
5574 CGM.getOpenMPRuntime().emitUpdateClause(
5575 *this, DOLVal, UC->getDependencyKind(), UC->getBeginLoc());
5576 return;
5580 void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
5581 if (!OMPParentLoopDirectiveForScan)
5582 return;
5583 const OMPExecutableDirective &ParentDir = *OMPParentLoopDirectiveForScan;
5584 bool IsInclusive = S.hasClausesOfKind<OMPInclusiveClause>();
5585 SmallVector<const Expr *, 4> Shareds;
5586 SmallVector<const Expr *, 4> Privates;
5587 SmallVector<const Expr *, 4> LHSs;
5588 SmallVector<const Expr *, 4> RHSs;
5589 SmallVector<const Expr *, 4> ReductionOps;
5590 SmallVector<const Expr *, 4> CopyOps;
5591 SmallVector<const Expr *, 4> CopyArrayTemps;
5592 SmallVector<const Expr *, 4> CopyArrayElems;
5593 for (const auto *C : ParentDir.getClausesOfKind<OMPReductionClause>()) {
5594 if (C->getModifier() != OMPC_REDUCTION_inscan)
5595 continue;
5596 Shareds.append(C->varlist_begin(), C->varlist_end());
5597 Privates.append(C->privates().begin(), C->privates().end());
5598 LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
5599 RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
5600 ReductionOps.append(C->reduction_ops().begin(), C->reduction_ops().end());
5601 CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
5602 CopyArrayTemps.append(C->copy_array_temps().begin(),
5603 C->copy_array_temps().end());
5604 CopyArrayElems.append(C->copy_array_elems().begin(),
5605 C->copy_array_elems().end());
5607 if (ParentDir.getDirectiveKind() == OMPD_simd ||
5608 (getLangOpts().OpenMPSimd &&
5609 isOpenMPSimdDirective(ParentDir.getDirectiveKind()))) {
5610 // For simd directive and simd-based directives in simd only mode, use the
5611 // following codegen:
5612 // int x = 0;
5613 // #pragma omp simd reduction(inscan, +: x)
5614 // for (..) {
5615 // <first part>
5616 // #pragma omp scan inclusive(x)
5617 // <second part>
5618 // }
5619 // is transformed to:
5620 // int x = 0;
5621 // for (..) {
5622 // int x_priv = 0;
5623 // <first part>
5624 // x = x_priv + x;
5625 // x_priv = x;
5626 // <second part>
5627 // }
5628 // and
5629 // int x = 0;
5630 // #pragma omp simd reduction(inscan, +: x)
5631 // for (..) {
5632 // <first part>
5633 // #pragma omp scan exclusive(x)
5634 // <second part>
5635 // }
5636 // to
5637 // int x = 0;
5638 // for (..) {
5639 // int x_priv = 0;
5640 // <second part>
5641 // int temp = x;
5642 // x = x_priv + x;
5643 // x_priv = temp;
5644 // <first part>
5645 // }
5646 llvm::BasicBlock *OMPScanReduce = createBasicBlock("omp.inscan.reduce");
5647 EmitBranch(IsInclusive
5648 ? OMPScanReduce
5649 : BreakContinueStack.back().ContinueBlock.getBlock());
5650 EmitBlock(OMPScanDispatch);
5652 // New scope for correct construction/destruction of temp variables for
5653 // exclusive scan.
5654 LexicalScope Scope(*this, S.getSourceRange());
5655 EmitBranch(IsInclusive ? OMPBeforeScanBlock : OMPAfterScanBlock);
5656 EmitBlock(OMPScanReduce);
5657 if (!IsInclusive) {
5658 // Create temp var and copy LHS value to this temp value.
5659 // TMP = LHS;
5660 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5661 const Expr *PrivateExpr = Privates[I];
5662 const Expr *TempExpr = CopyArrayTemps[I];
5663 EmitAutoVarDecl(
5664 *cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
5665 LValue DestLVal = EmitLValue(TempExpr);
5666 LValue SrcLVal = EmitLValue(LHSs[I]);
5667 EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(),
5668 SrcLVal.getAddress(),
5669 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5670 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
5671 CopyOps[I]);
5674 CGM.getOpenMPRuntime().emitReduction(
5675 *this, ParentDir.getEndLoc(), Privates, LHSs, RHSs, ReductionOps,
5676 {/*WithNowait=*/true, /*SimpleReduction=*/true, OMPD_simd});
5677 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5678 const Expr *PrivateExpr = Privates[I];
5679 LValue DestLVal;
5680 LValue SrcLVal;
5681 if (IsInclusive) {
5682 DestLVal = EmitLValue(RHSs[I]);
5683 SrcLVal = EmitLValue(LHSs[I]);
5684 } else {
5685 const Expr *TempExpr = CopyArrayTemps[I];
5686 DestLVal = EmitLValue(RHSs[I]);
5687 SrcLVal = EmitLValue(TempExpr);
5689 EmitOMPCopy(
5690 PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
5691 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5692 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
5695 EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
5696 OMPScanExitBlock = IsInclusive
5697 ? BreakContinueStack.back().ContinueBlock.getBlock()
5698 : OMPScanReduce;
5699 EmitBlock(OMPAfterScanBlock);
5700 return;
5702 if (!IsInclusive) {
5703 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5704 EmitBlock(OMPScanExitBlock);
5706 if (OMPFirstScanLoop) {
5707 // Emit buffer[i] = red; at the end of the input phase.
5708 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5709 .getIterationVariable()
5710 ->IgnoreParenImpCasts();
5711 LValue IdxLVal = EmitLValue(IVExpr);
5712 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
5713 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
5714 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5715 const Expr *PrivateExpr = Privates[I];
5716 const Expr *OrigExpr = Shareds[I];
5717 const Expr *CopyArrayElem = CopyArrayElems[I];
5718 OpaqueValueMapping IdxMapping(
5719 *this,
5720 cast<OpaqueValueExpr>(
5721 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5722 RValue::get(IdxVal));
5723 LValue DestLVal = EmitLValue(CopyArrayElem);
5724 LValue SrcLVal = EmitLValue(OrigExpr);
5725 EmitOMPCopy(
5726 PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
5727 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5728 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
5731 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5732 if (IsInclusive) {
5733 EmitBlock(OMPScanExitBlock);
5734 EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
5736 EmitBlock(OMPScanDispatch);
5737 if (!OMPFirstScanLoop) {
5738 // Emit red = buffer[i]; at the entrance to the scan phase.
5739 const auto *IVExpr = cast<OMPLoopDirective>(ParentDir)
5740 .getIterationVariable()
5741 ->IgnoreParenImpCasts();
5742 LValue IdxLVal = EmitLValue(IVExpr);
5743 llvm::Value *IdxVal = EmitLoadOfScalar(IdxLVal, IVExpr->getExprLoc());
5744 IdxVal = Builder.CreateIntCast(IdxVal, SizeTy, /*isSigned=*/false);
5745 llvm::BasicBlock *ExclusiveExitBB = nullptr;
5746 if (!IsInclusive) {
5747 llvm::BasicBlock *ContBB = createBasicBlock("omp.exclusive.dec");
5748 ExclusiveExitBB = createBasicBlock("omp.exclusive.copy.exit");
5749 llvm::Value *Cmp = Builder.CreateIsNull(IdxVal);
5750 Builder.CreateCondBr(Cmp, ExclusiveExitBB, ContBB);
5751 EmitBlock(ContBB);
5752 // Use idx - 1 iteration for exclusive scan.
5753 IdxVal = Builder.CreateNUWSub(IdxVal, llvm::ConstantInt::get(SizeTy, 1));
5755 for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
5756 const Expr *PrivateExpr = Privates[I];
5757 const Expr *OrigExpr = Shareds[I];
5758 const Expr *CopyArrayElem = CopyArrayElems[I];
5759 OpaqueValueMapping IdxMapping(
5760 *this,
5761 cast<OpaqueValueExpr>(
5762 cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
5763 RValue::get(IdxVal));
5764 LValue SrcLVal = EmitLValue(CopyArrayElem);
5765 LValue DestLVal = EmitLValue(OrigExpr);
5766 EmitOMPCopy(
5767 PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
5768 cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
5769 cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
5771 if (!IsInclusive) {
5772 EmitBlock(ExclusiveExitBB);
5775 EmitBranch((OMPFirstScanLoop == IsInclusive) ? OMPBeforeScanBlock
5776 : OMPAfterScanBlock);
5777 EmitBlock(OMPAfterScanBlock);
5780 void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
5781 const CodeGenLoopTy &CodeGenLoop,
5782 Expr *IncExpr) {
5783 // Emit the loop iteration variable.
5784 const auto *IVExpr = cast<DeclRefExpr>(S.getIterationVariable());
5785 const auto *IVDecl = cast<VarDecl>(IVExpr->getDecl());
5786 EmitVarDecl(*IVDecl);
5788 // Emit the iterations count variable.
5789 // If it is not a variable, Sema decided to calculate iterations count on each
5790 // iteration (e.g., it is foldable into a constant).
5791 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
5792 EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
5793 // Emit calculation of the iterations count.
5794 EmitIgnoredExpr(S.getCalcLastIteration());
5797 CGOpenMPRuntime &RT = CGM.getOpenMPRuntime();
5799 bool HasLastprivateClause = false;
5800 // Check pre-condition.
5802 OMPLoopScope PreInitScope(*this, S);
5803 // Skip the entire loop if we don't meet the precondition.
5804 // If the condition constant folds and can be elided, avoid emitting the
5805 // whole loop.
5806 bool CondConstant;
5807 llvm::BasicBlock *ContBlock = nullptr;
5808 if (ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
5809 if (!CondConstant)
5810 return;
5811 } else {
5812 llvm::BasicBlock *ThenBlock = createBasicBlock("omp.precond.then");
5813 ContBlock = createBasicBlock("omp.precond.end");
5814 emitPreCond(*this, S, S.getPreCond(), ThenBlock, ContBlock,
5815 getProfileCount(&S));
5816 EmitBlock(ThenBlock);
5817 incrementProfileCounter(&S);
5820 emitAlignedClause(*this, S);
5821 // Emit 'then' code.
5823 // Emit helper vars inits.
5825 LValue LB = EmitOMPHelperVar(
5826 *this, cast<DeclRefExpr>(
5827 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5828 ? S.getCombinedLowerBoundVariable()
5829 : S.getLowerBoundVariable())));
5830 LValue UB = EmitOMPHelperVar(
5831 *this, cast<DeclRefExpr>(
5832 (isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5833 ? S.getCombinedUpperBoundVariable()
5834 : S.getUpperBoundVariable())));
5835 LValue ST =
5836 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getStrideVariable()));
5837 LValue IL =
5838 EmitOMPHelperVar(*this, cast<DeclRefExpr>(S.getIsLastIterVariable()));
5840 OMPPrivateScope LoopScope(*this);
5841 if (EmitOMPFirstprivateClause(S, LoopScope)) {
5842 // Emit implicit barrier to synchronize threads and avoid data races
5843 // on initialization of firstprivate variables and post-update of
5844 // lastprivate variables.
5845 CGM.getOpenMPRuntime().emitBarrierCall(
5846 *this, S.getBeginLoc(), OMPD_unknown, /*EmitChecks=*/false,
5847 /*ForceSimpleCall=*/true);
5849 EmitOMPPrivateClause(S, LoopScope);
5850 if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
5851 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
5852 !isOpenMPTeamsDirective(S.getDirectiveKind()))
5853 EmitOMPReductionClauseInit(S, LoopScope);
5854 HasLastprivateClause = EmitOMPLastprivateClauseInit(S, LoopScope);
5855 EmitOMPPrivateLoopCounters(S, LoopScope);
5856 (void)LoopScope.Privatize();
5857 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
5858 CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(*this, S);
5860 // Detect the distribute schedule kind and chunk.
5861 llvm::Value *Chunk = nullptr;
5862 OpenMPDistScheduleClauseKind ScheduleKind = OMPC_DIST_SCHEDULE_unknown;
5863 if (const auto *C = S.getSingleClause<OMPDistScheduleClause>()) {
5864 ScheduleKind = C->getDistScheduleKind();
5865 if (const Expr *Ch = C->getChunkSize()) {
5866 Chunk = EmitScalarExpr(Ch);
5867 Chunk = EmitScalarConversion(Chunk, Ch->getType(),
5868 S.getIterationVariable()->getType(),
5869 S.getBeginLoc());
5871 } else {
5872 // Default behaviour for dist_schedule clause.
5873 CGM.getOpenMPRuntime().getDefaultDistScheduleAndChunk(
5874 *this, S, ScheduleKind, Chunk);
5876 const unsigned IVSize = getContext().getTypeSize(IVExpr->getType());
5877 const bool IVSigned = IVExpr->getType()->hasSignedIntegerRepresentation();
5879 // OpenMP [2.10.8, distribute Construct, Description]
5880 // If dist_schedule is specified, kind must be static. If specified,
5881 // iterations are divided into chunks of size chunk_size, chunks are
5882 // assigned to the teams of the league in a round-robin fashion in the
5883 // order of the team number. When no chunk_size is specified, the
5884 // iteration space is divided into chunks that are approximately equal
5885 // in size, and at most one chunk is distributed to each team of the
5886 // league. The size of the chunks is unspecified in this case.
5887 bool StaticChunked =
5888 RT.isStaticChunked(ScheduleKind, /* Chunked */ Chunk != nullptr) &&
5889 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind());
5890 if (RT.isStaticNonchunked(ScheduleKind,
5891 /* Chunked */ Chunk != nullptr) ||
5892 StaticChunked) {
5893 CGOpenMPRuntime::StaticRTInput StaticInit(
5894 IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(),
5895 LB.getAddress(), UB.getAddress(), ST.getAddress(),
5896 StaticChunked ? Chunk : nullptr);
5897 RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
5898 StaticInit);
5899 JumpDest LoopExit =
5900 getJumpDestInCurrentScope(createBasicBlock("omp.loop.exit"));
5901 // UB = min(UB, GlobalUB);
5902 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5903 ? S.getCombinedEnsureUpperBound()
5904 : S.getEnsureUpperBound());
5905 // IV = LB;
5906 EmitIgnoredExpr(isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5907 ? S.getCombinedInit()
5908 : S.getInit());
5910 const Expr *Cond =
5911 isOpenMPLoopBoundSharingDirective(S.getDirectiveKind())
5912 ? S.getCombinedCond()
5913 : S.getCond();
5915 if (StaticChunked)
5916 Cond = S.getCombinedDistCond();
5918 // For static unchunked schedules generate:
5920 // 1. For distribute alone, codegen
5921 // while (idx <= UB) {
5922 // BODY;
5923 // ++idx;
5924 // }
5926 // 2. When combined with 'for' (e.g. as in 'distribute parallel for')
5927 // while (idx <= UB) {
5928 // <CodeGen rest of pragma>(LB, UB);
5929 // idx += ST;
5930 // }
5932 // For static chunk one schedule generate:
5934 // while (IV <= GlobalUB) {
5935 // <CodeGen rest of pragma>(LB, UB);
5936 // LB += ST;
5937 // UB += ST;
5938 // UB = min(UB, GlobalUB);
5939 // IV = LB;
5940 // }
5942 emitCommonSimdLoop(
5943 *this, S,
5944 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
5945 if (isOpenMPSimdDirective(S.getDirectiveKind()))
5946 CGF.EmitOMPSimdInit(S);
5948 [&S, &LoopScope, Cond, IncExpr, LoopExit, &CodeGenLoop,
5949 StaticChunked](CodeGenFunction &CGF, PrePostActionTy &) {
5950 CGF.EmitOMPInnerLoop(
5951 S, LoopScope.requiresCleanups(), Cond, IncExpr,
5952 [&S, LoopExit, &CodeGenLoop](CodeGenFunction &CGF) {
5953 CodeGenLoop(CGF, S, LoopExit);
5955 [&S, StaticChunked](CodeGenFunction &CGF) {
5956 if (StaticChunked) {
5957 CGF.EmitIgnoredExpr(S.getCombinedNextLowerBound());
5958 CGF.EmitIgnoredExpr(S.getCombinedNextUpperBound());
5959 CGF.EmitIgnoredExpr(S.getCombinedEnsureUpperBound());
5960 CGF.EmitIgnoredExpr(S.getCombinedInit());
5964 EmitBlock(LoopExit.getBlock());
5965 // Tell the runtime we are done.
5966 RT.emitForStaticFinish(*this, S.getEndLoc(), OMPD_distribute);
5967 } else {
5968 // Emit the outer loop, which requests its work chunk [LB..UB] from
5969 // runtime and runs the inner loop to process it.
5970 const OMPLoopArguments LoopArguments = {
5971 LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(),
5972 Chunk};
5973 EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
5974 CodeGenLoop);
5976 if (isOpenMPSimdDirective(S.getDirectiveKind())) {
5977 EmitOMPSimdFinal(S, [IL, &S](CodeGenFunction &CGF) {
5978 return CGF.Builder.CreateIsNotNull(
5979 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
5982 if (isOpenMPSimdDirective(S.getDirectiveKind()) &&
5983 !isOpenMPParallelDirective(S.getDirectiveKind()) &&
5984 !isOpenMPTeamsDirective(S.getDirectiveKind())) {
5985 EmitOMPReductionClauseFinal(S, OMPD_simd);
5986 // Emit post-update of the reduction variables if IsLastIter != 0.
5987 emitPostUpdateForReductionClause(
5988 *this, S, [IL, &S](CodeGenFunction &CGF) {
5989 return CGF.Builder.CreateIsNotNull(
5990 CGF.EmitLoadOfScalar(IL, S.getBeginLoc()));
5993 // Emit final copy of the lastprivate variables if IsLastIter != 0.
5994 if (HasLastprivateClause) {
5995 EmitOMPLastprivateClauseFinal(
5996 S, /*NoFinals=*/false,
5997 Builder.CreateIsNotNull(EmitLoadOfScalar(IL, S.getBeginLoc())));
6001 // We're now done with the loop, so jump to the continuation block.
6002 if (ContBlock) {
6003 EmitBranch(ContBlock);
6004 EmitBlock(ContBlock, true);
6009 // Pass OMPLoopDirective (instead of OMPDistributeDirective) to make this
6010 // function available for "loop bind(teams)", which maps to "distribute".
6011 static void emitOMPDistributeDirective(const OMPLoopDirective &S,
6012 CodeGenFunction &CGF,
6013 CodeGenModule &CGM) {
6014 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6015 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
6017 OMPLexicalScope Scope(CGF, S, OMPD_unknown);
6018 CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute, CodeGen);
6021 void CodeGenFunction::EmitOMPDistributeDirective(
6022 const OMPDistributeDirective &S) {
6023 emitOMPDistributeDirective(S, *this, CGM);
6026 static llvm::Function *emitOutlinedOrderedFunction(CodeGenModule &CGM,
6027 const CapturedStmt *S,
6028 SourceLocation Loc) {
6029 CodeGenFunction CGF(CGM, /*suppressNewContext=*/true);
6030 CodeGenFunction::CGCapturedStmtInfo CapStmtInfo;
6031 CGF.CapturedStmtInfo = &CapStmtInfo;
6032 llvm::Function *Fn = CGF.GenerateOpenMPCapturedStmtFunction(*S, Loc);
6033 Fn->setDoesNotRecurse();
6034 return Fn;
6037 template <typename T>
6038 static void emitRestoreIP(CodeGenFunction &CGF, const T *C,
6039 llvm::OpenMPIRBuilder::InsertPointTy AllocaIP,
6040 llvm::OpenMPIRBuilder &OMPBuilder) {
6042 unsigned NumLoops = C->getNumLoops();
6043 QualType Int64Ty = CGF.CGM.getContext().getIntTypeForBitwidth(
6044 /*DestWidth=*/64, /*Signed=*/1);
6045 llvm::SmallVector<llvm::Value *> StoreValues;
6046 for (unsigned I = 0; I < NumLoops; I++) {
6047 const Expr *CounterVal = C->getLoopData(I);
6048 assert(CounterVal);
6049 llvm::Value *StoreValue = CGF.EmitScalarConversion(
6050 CGF.EmitScalarExpr(CounterVal), CounterVal->getType(), Int64Ty,
6051 CounterVal->getExprLoc());
6052 StoreValues.emplace_back(StoreValue);
6054 OMPDoacrossKind<T> ODK;
6055 bool IsDependSource = ODK.isSource(C);
6056 CGF.Builder.restoreIP(
6057 OMPBuilder.createOrderedDepend(CGF.Builder, AllocaIP, NumLoops,
6058 StoreValues, ".cnt.addr", IsDependSource));
6061 void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
6062 if (CGM.getLangOpts().OpenMPIRBuilder) {
6063 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
6064 using InsertPointTy = llvm::OpenMPIRBuilder::InsertPointTy;
6066 if (S.hasClausesOfKind<OMPDependClause>() ||
6067 S.hasClausesOfKind<OMPDoacrossClause>()) {
6068 // The ordered directive with depend clause.
6069 assert(!S.hasAssociatedStmt() && "No associated statement must be in "
6070 "ordered depend|doacross construct.");
6071 InsertPointTy AllocaIP(AllocaInsertPt->getParent(),
6072 AllocaInsertPt->getIterator());
6073 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
6074 emitRestoreIP(*this, DC, AllocaIP, OMPBuilder);
6075 for (const auto *DC : S.getClausesOfKind<OMPDoacrossClause>())
6076 emitRestoreIP(*this, DC, AllocaIP, OMPBuilder);
6077 } else {
6078 // The ordered directive with threads or simd clause, or without clause.
6079 // Without clause, it behaves as if the threads clause is specified.
6080 const auto *C = S.getSingleClause<OMPSIMDClause>();
6082 auto FiniCB = [this](InsertPointTy IP) {
6083 OMPBuilderCBHelpers::FinalizeOMPRegion(*this, IP);
6084 return llvm::Error::success();
6087 auto BodyGenCB = [&S, C, this](InsertPointTy AllocaIP,
6088 InsertPointTy CodeGenIP) {
6089 Builder.restoreIP(CodeGenIP);
6091 const CapturedStmt *CS = S.getInnermostCapturedStmt();
6092 if (C) {
6093 llvm::BasicBlock *FiniBB = splitBBWithSuffix(
6094 Builder, /*CreateBranch=*/false, ".ordered.after");
6095 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
6096 GenerateOpenMPCapturedVars(*CS, CapturedVars);
6097 llvm::Function *OutlinedFn =
6098 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc());
6099 assert(S.getBeginLoc().isValid() &&
6100 "Outlined function call location must be valid.");
6101 ApplyDebugLocation::CreateDefaultArtificial(*this, S.getBeginLoc());
6102 OMPBuilderCBHelpers::EmitCaptureStmt(*this, CodeGenIP, *FiniBB,
6103 OutlinedFn, CapturedVars);
6104 } else {
6105 OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
6106 *this, CS->getCapturedStmt(), AllocaIP, CodeGenIP, "ordered");
6108 return llvm::Error::success();
6111 OMPLexicalScope Scope(*this, S, OMPD_unknown);
6112 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
6113 OMPBuilder.createOrderedThreadsSimd(Builder, BodyGenCB, FiniCB, !C);
6114 assert(AfterIP && "unexpected error creating ordered");
6115 Builder.restoreIP(*AfterIP);
6117 return;
6120 if (S.hasClausesOfKind<OMPDependClause>()) {
6121 assert(!S.hasAssociatedStmt() &&
6122 "No associated statement must be in ordered depend construct.");
6123 for (const auto *DC : S.getClausesOfKind<OMPDependClause>())
6124 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
6125 return;
6127 if (S.hasClausesOfKind<OMPDoacrossClause>()) {
6128 assert(!S.hasAssociatedStmt() &&
6129 "No associated statement must be in ordered doacross construct.");
6130 for (const auto *DC : S.getClausesOfKind<OMPDoacrossClause>())
6131 CGM.getOpenMPRuntime().emitDoacrossOrdered(*this, DC);
6132 return;
6134 const auto *C = S.getSingleClause<OMPSIMDClause>();
6135 auto &&CodeGen = [&S, C, this](CodeGenFunction &CGF,
6136 PrePostActionTy &Action) {
6137 const CapturedStmt *CS = S.getInnermostCapturedStmt();
6138 if (C) {
6139 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
6140 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
6141 llvm::Function *OutlinedFn =
6142 emitOutlinedOrderedFunction(CGM, CS, S.getBeginLoc());
6143 CGM.getOpenMPRuntime().emitOutlinedFunctionCall(CGF, S.getBeginLoc(),
6144 OutlinedFn, CapturedVars);
6145 } else {
6146 Action.Enter(CGF);
6147 CGF.EmitStmt(CS->getCapturedStmt());
6150 OMPLexicalScope Scope(*this, S, OMPD_unknown);
6151 CGM.getOpenMPRuntime().emitOrderedRegion(*this, CodeGen, S.getBeginLoc(), !C);
6154 static llvm::Value *convertToScalarValue(CodeGenFunction &CGF, RValue Val,
6155 QualType SrcType, QualType DestType,
6156 SourceLocation Loc) {
6157 assert(CGF.hasScalarEvaluationKind(DestType) &&
6158 "DestType must have scalar evaluation kind.");
6159 assert(!Val.isAggregate() && "Must be a scalar or complex.");
6160 return Val.isScalar() ? CGF.EmitScalarConversion(Val.getScalarVal(), SrcType,
6161 DestType, Loc)
6162 : CGF.EmitComplexToScalarConversion(
6163 Val.getComplexVal(), SrcType, DestType, Loc);
6166 static CodeGenFunction::ComplexPairTy
6167 convertToComplexValue(CodeGenFunction &CGF, RValue Val, QualType SrcType,
6168 QualType DestType, SourceLocation Loc) {
6169 assert(CGF.getEvaluationKind(DestType) == TEK_Complex &&
6170 "DestType must have complex evaluation kind.");
6171 CodeGenFunction::ComplexPairTy ComplexVal;
6172 if (Val.isScalar()) {
6173 // Convert the input element to the element type of the complex.
6174 QualType DestElementType =
6175 DestType->castAs<ComplexType>()->getElementType();
6176 llvm::Value *ScalarVal = CGF.EmitScalarConversion(
6177 Val.getScalarVal(), SrcType, DestElementType, Loc);
6178 ComplexVal = CodeGenFunction::ComplexPairTy(
6179 ScalarVal, llvm::Constant::getNullValue(ScalarVal->getType()));
6180 } else {
6181 assert(Val.isComplex() && "Must be a scalar or complex.");
6182 QualType SrcElementType = SrcType->castAs<ComplexType>()->getElementType();
6183 QualType DestElementType =
6184 DestType->castAs<ComplexType>()->getElementType();
6185 ComplexVal.first = CGF.EmitScalarConversion(
6186 Val.getComplexVal().first, SrcElementType, DestElementType, Loc);
6187 ComplexVal.second = CGF.EmitScalarConversion(
6188 Val.getComplexVal().second, SrcElementType, DestElementType, Loc);
6190 return ComplexVal;
6193 static void emitSimpleAtomicStore(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
6194 LValue LVal, RValue RVal) {
6195 if (LVal.isGlobalReg())
6196 CGF.EmitStoreThroughGlobalRegLValue(RVal, LVal);
6197 else
6198 CGF.EmitAtomicStore(RVal, LVal, AO, LVal.isVolatile(), /*isInit=*/false);
6201 static RValue emitSimpleAtomicLoad(CodeGenFunction &CGF,
6202 llvm::AtomicOrdering AO, LValue LVal,
6203 SourceLocation Loc) {
6204 if (LVal.isGlobalReg())
6205 return CGF.EmitLoadOfLValue(LVal, Loc);
6206 return CGF.EmitAtomicLoad(
6207 LVal, Loc, llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(AO),
6208 LVal.isVolatile());
6211 void CodeGenFunction::emitOMPSimpleStore(LValue LVal, RValue RVal,
6212 QualType RValTy, SourceLocation Loc) {
6213 switch (getEvaluationKind(LVal.getType())) {
6214 case TEK_Scalar:
6215 EmitStoreThroughLValue(RValue::get(convertToScalarValue(
6216 *this, RVal, RValTy, LVal.getType(), Loc)),
6217 LVal);
6218 break;
6219 case TEK_Complex:
6220 EmitStoreOfComplex(
6221 convertToComplexValue(*this, RVal, RValTy, LVal.getType(), Loc), LVal,
6222 /*isInit=*/false);
6223 break;
6224 case TEK_Aggregate:
6225 llvm_unreachable("Must be a scalar or complex.");
6229 static void emitOMPAtomicReadExpr(CodeGenFunction &CGF, llvm::AtomicOrdering AO,
6230 const Expr *X, const Expr *V,
6231 SourceLocation Loc) {
6232 // v = x;
6233 assert(V->isLValue() && "V of 'omp atomic read' is not lvalue");
6234 assert(X->isLValue() && "X of 'omp atomic read' is not lvalue");
6235 LValue XLValue = CGF.EmitLValue(X);
6236 LValue VLValue = CGF.EmitLValue(V);
6237 RValue Res = emitSimpleAtomicLoad(CGF, AO, XLValue, Loc);
6238 // OpenMP, 2.17.7, atomic Construct
6239 // If the read or capture clause is specified and the acquire, acq_rel, or
6240 // seq_cst clause is specified then the strong flush on exit from the atomic
6241 // operation is also an acquire flush.
6242 switch (AO) {
6243 case llvm::AtomicOrdering::Acquire:
6244 case llvm::AtomicOrdering::AcquireRelease:
6245 case llvm::AtomicOrdering::SequentiallyConsistent:
6246 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, {}, Loc,
6247 llvm::AtomicOrdering::Acquire);
6248 break;
6249 case llvm::AtomicOrdering::Monotonic:
6250 case llvm::AtomicOrdering::Release:
6251 break;
6252 case llvm::AtomicOrdering::NotAtomic:
6253 case llvm::AtomicOrdering::Unordered:
6254 llvm_unreachable("Unexpected ordering.");
6256 CGF.emitOMPSimpleStore(VLValue, Res, X->getType().getNonReferenceType(), Loc);
6257 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
6260 static void emitOMPAtomicWriteExpr(CodeGenFunction &CGF,
6261 llvm::AtomicOrdering AO, const Expr *X,
6262 const Expr *E, SourceLocation Loc) {
6263 // x = expr;
6264 assert(X->isLValue() && "X of 'omp atomic write' is not lvalue");
6265 emitSimpleAtomicStore(CGF, AO, CGF.EmitLValue(X), CGF.EmitAnyExpr(E));
6266 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
6267 // OpenMP, 2.17.7, atomic Construct
6268 // If the write, update, or capture clause is specified and the release,
6269 // acq_rel, or seq_cst clause is specified then the strong flush on entry to
6270 // the atomic operation is also a release flush.
6271 switch (AO) {
6272 case llvm::AtomicOrdering::Release:
6273 case llvm::AtomicOrdering::AcquireRelease:
6274 case llvm::AtomicOrdering::SequentiallyConsistent:
6275 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, {}, Loc,
6276 llvm::AtomicOrdering::Release);
6277 break;
6278 case llvm::AtomicOrdering::Acquire:
6279 case llvm::AtomicOrdering::Monotonic:
6280 break;
6281 case llvm::AtomicOrdering::NotAtomic:
6282 case llvm::AtomicOrdering::Unordered:
6283 llvm_unreachable("Unexpected ordering.");
6287 static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
6288 RValue Update,
6289 BinaryOperatorKind BO,
6290 llvm::AtomicOrdering AO,
6291 bool IsXLHSInRHSPart) {
6292 ASTContext &Context = CGF.getContext();
6293 // Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
6294 // expression is simple and atomic is allowed for the given type for the
6295 // target platform.
6296 if (BO == BO_Comma || !Update.isScalar() || !X.isSimple() ||
6297 (!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
6298 (Update.getScalarVal()->getType() != X.getAddress().getElementType())) ||
6299 !Context.getTargetInfo().hasBuiltinAtomic(
6300 Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
6301 return std::make_pair(false, RValue::get(nullptr));
6303 auto &&CheckAtomicSupport = [&CGF](llvm::Type *T, BinaryOperatorKind BO) {
6304 if (T->isIntegerTy())
6305 return true;
6307 if (T->isFloatingPointTy() && (BO == BO_Add || BO == BO_Sub))
6308 return llvm::isPowerOf2_64(CGF.CGM.getDataLayout().getTypeStoreSize(T));
6310 return false;
6313 if (!CheckAtomicSupport(Update.getScalarVal()->getType(), BO) ||
6314 !CheckAtomicSupport(X.getAddress().getElementType(), BO))
6315 return std::make_pair(false, RValue::get(nullptr));
6317 bool IsInteger = X.getAddress().getElementType()->isIntegerTy();
6318 llvm::AtomicRMWInst::BinOp RMWOp;
6319 switch (BO) {
6320 case BO_Add:
6321 RMWOp = IsInteger ? llvm::AtomicRMWInst::Add : llvm::AtomicRMWInst::FAdd;
6322 break;
6323 case BO_Sub:
6324 if (!IsXLHSInRHSPart)
6325 return std::make_pair(false, RValue::get(nullptr));
6326 RMWOp = IsInteger ? llvm::AtomicRMWInst::Sub : llvm::AtomicRMWInst::FSub;
6327 break;
6328 case BO_And:
6329 RMWOp = llvm::AtomicRMWInst::And;
6330 break;
6331 case BO_Or:
6332 RMWOp = llvm::AtomicRMWInst::Or;
6333 break;
6334 case BO_Xor:
6335 RMWOp = llvm::AtomicRMWInst::Xor;
6336 break;
6337 case BO_LT:
6338 if (IsInteger)
6339 RMWOp = X.getType()->hasSignedIntegerRepresentation()
6340 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Min
6341 : llvm::AtomicRMWInst::Max)
6342 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMin
6343 : llvm::AtomicRMWInst::UMax);
6344 else
6345 RMWOp = IsXLHSInRHSPart ? llvm::AtomicRMWInst::FMin
6346 : llvm::AtomicRMWInst::FMax;
6347 break;
6348 case BO_GT:
6349 if (IsInteger)
6350 RMWOp = X.getType()->hasSignedIntegerRepresentation()
6351 ? (IsXLHSInRHSPart ? llvm::AtomicRMWInst::Max
6352 : llvm::AtomicRMWInst::Min)
6353 : (IsXLHSInRHSPart ? llvm::AtomicRMWInst::UMax
6354 : llvm::AtomicRMWInst::UMin);
6355 else
6356 RMWOp = IsXLHSInRHSPart ? llvm::AtomicRMWInst::FMax
6357 : llvm::AtomicRMWInst::FMin;
6358 break;
6359 case BO_Assign:
6360 RMWOp = llvm::AtomicRMWInst::Xchg;
6361 break;
6362 case BO_Mul:
6363 case BO_Div:
6364 case BO_Rem:
6365 case BO_Shl:
6366 case BO_Shr:
6367 case BO_LAnd:
6368 case BO_LOr:
6369 return std::make_pair(false, RValue::get(nullptr));
6370 case BO_PtrMemD:
6371 case BO_PtrMemI:
6372 case BO_LE:
6373 case BO_GE:
6374 case BO_EQ:
6375 case BO_NE:
6376 case BO_Cmp:
6377 case BO_AddAssign:
6378 case BO_SubAssign:
6379 case BO_AndAssign:
6380 case BO_OrAssign:
6381 case BO_XorAssign:
6382 case BO_MulAssign:
6383 case BO_DivAssign:
6384 case BO_RemAssign:
6385 case BO_ShlAssign:
6386 case BO_ShrAssign:
6387 case BO_Comma:
6388 llvm_unreachable("Unsupported atomic update operation");
6390 llvm::Value *UpdateVal = Update.getScalarVal();
6391 if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
6392 if (IsInteger)
6393 UpdateVal = CGF.Builder.CreateIntCast(
6394 IC, X.getAddress().getElementType(),
6395 X.getType()->hasSignedIntegerRepresentation());
6396 else
6397 UpdateVal = CGF.Builder.CreateCast(llvm::Instruction::CastOps::UIToFP, IC,
6398 X.getAddress().getElementType());
6400 llvm::AtomicRMWInst *Res =
6401 CGF.emitAtomicRMWInst(RMWOp, X.getAddress(), UpdateVal, AO);
6402 return std::make_pair(true, RValue::get(Res));
6405 std::pair<bool, RValue> CodeGenFunction::EmitOMPAtomicSimpleUpdateExpr(
6406 LValue X, RValue E, BinaryOperatorKind BO, bool IsXLHSInRHSPart,
6407 llvm::AtomicOrdering AO, SourceLocation Loc,
6408 const llvm::function_ref<RValue(RValue)> CommonGen) {
6409 // Update expressions are allowed to have the following forms:
6410 // x binop= expr; -> xrval + expr;
6411 // x++, ++x -> xrval + 1;
6412 // x--, --x -> xrval - 1;
6413 // x = x binop expr; -> xrval binop expr
6414 // x = expr Op x; - > expr binop xrval;
6415 auto Res = emitOMPAtomicRMW(*this, X, E, BO, AO, IsXLHSInRHSPart);
6416 if (!Res.first) {
6417 if (X.isGlobalReg()) {
6418 // Emit an update expression: 'xrval' binop 'expr' or 'expr' binop
6419 // 'xrval'.
6420 EmitStoreThroughLValue(CommonGen(EmitLoadOfLValue(X, Loc)), X);
6421 } else {
6422 // Perform compare-and-swap procedure.
6423 EmitAtomicUpdate(X, AO, CommonGen, X.getType().isVolatileQualified());
6426 return Res;
6429 static void emitOMPAtomicUpdateExpr(CodeGenFunction &CGF,
6430 llvm::AtomicOrdering AO, const Expr *X,
6431 const Expr *E, const Expr *UE,
6432 bool IsXLHSInRHSPart, SourceLocation Loc) {
6433 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
6434 "Update expr in 'atomic update' must be a binary operator.");
6435 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
6436 // Update expressions are allowed to have the following forms:
6437 // x binop= expr; -> xrval + expr;
6438 // x++, ++x -> xrval + 1;
6439 // x--, --x -> xrval - 1;
6440 // x = x binop expr; -> xrval binop expr
6441 // x = expr Op x; - > expr binop xrval;
6442 assert(X->isLValue() && "X of 'omp atomic update' is not lvalue");
6443 LValue XLValue = CGF.EmitLValue(X);
6444 RValue ExprRValue = CGF.EmitAnyExpr(E);
6445 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
6446 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
6447 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
6448 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
6449 auto &&Gen = [&CGF, UE, ExprRValue, XRValExpr, ERValExpr](RValue XRValue) {
6450 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6451 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
6452 return CGF.EmitAnyExpr(UE);
6454 (void)CGF.EmitOMPAtomicSimpleUpdateExpr(
6455 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
6456 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
6457 // OpenMP, 2.17.7, atomic Construct
6458 // If the write, update, or capture clause is specified and the release,
6459 // acq_rel, or seq_cst clause is specified then the strong flush on entry to
6460 // the atomic operation is also a release flush.
6461 switch (AO) {
6462 case llvm::AtomicOrdering::Release:
6463 case llvm::AtomicOrdering::AcquireRelease:
6464 case llvm::AtomicOrdering::SequentiallyConsistent:
6465 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, {}, Loc,
6466 llvm::AtomicOrdering::Release);
6467 break;
6468 case llvm::AtomicOrdering::Acquire:
6469 case llvm::AtomicOrdering::Monotonic:
6470 break;
6471 case llvm::AtomicOrdering::NotAtomic:
6472 case llvm::AtomicOrdering::Unordered:
6473 llvm_unreachable("Unexpected ordering.");
6477 static RValue convertToType(CodeGenFunction &CGF, RValue Value,
6478 QualType SourceType, QualType ResType,
6479 SourceLocation Loc) {
6480 switch (CGF.getEvaluationKind(ResType)) {
6481 case TEK_Scalar:
6482 return RValue::get(
6483 convertToScalarValue(CGF, Value, SourceType, ResType, Loc));
6484 case TEK_Complex: {
6485 auto Res = convertToComplexValue(CGF, Value, SourceType, ResType, Loc);
6486 return RValue::getComplex(Res.first, Res.second);
6488 case TEK_Aggregate:
6489 break;
6491 llvm_unreachable("Must be a scalar or complex.");
6494 static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF,
6495 llvm::AtomicOrdering AO,
6496 bool IsPostfixUpdate, const Expr *V,
6497 const Expr *X, const Expr *E,
6498 const Expr *UE, bool IsXLHSInRHSPart,
6499 SourceLocation Loc) {
6500 assert(X->isLValue() && "X of 'omp atomic capture' is not lvalue");
6501 assert(V->isLValue() && "V of 'omp atomic capture' is not lvalue");
6502 RValue NewVVal;
6503 LValue VLValue = CGF.EmitLValue(V);
6504 LValue XLValue = CGF.EmitLValue(X);
6505 RValue ExprRValue = CGF.EmitAnyExpr(E);
6506 QualType NewVValType;
6507 if (UE) {
6508 // 'x' is updated with some additional value.
6509 assert(isa<BinaryOperator>(UE->IgnoreImpCasts()) &&
6510 "Update expr in 'atomic capture' must be a binary operator.");
6511 const auto *BOUE = cast<BinaryOperator>(UE->IgnoreImpCasts());
6512 // Update expressions are allowed to have the following forms:
6513 // x binop= expr; -> xrval + expr;
6514 // x++, ++x -> xrval + 1;
6515 // x--, --x -> xrval - 1;
6516 // x = x binop expr; -> xrval binop expr
6517 // x = expr Op x; - > expr binop xrval;
6518 const auto *LHS = cast<OpaqueValueExpr>(BOUE->getLHS()->IgnoreImpCasts());
6519 const auto *RHS = cast<OpaqueValueExpr>(BOUE->getRHS()->IgnoreImpCasts());
6520 const OpaqueValueExpr *XRValExpr = IsXLHSInRHSPart ? LHS : RHS;
6521 NewVValType = XRValExpr->getType();
6522 const OpaqueValueExpr *ERValExpr = IsXLHSInRHSPart ? RHS : LHS;
6523 auto &&Gen = [&CGF, &NewVVal, UE, ExprRValue, XRValExpr, ERValExpr,
6524 IsPostfixUpdate](RValue XRValue) {
6525 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6526 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, XRValue);
6527 RValue Res = CGF.EmitAnyExpr(UE);
6528 NewVVal = IsPostfixUpdate ? XRValue : Res;
6529 return Res;
6531 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
6532 XLValue, ExprRValue, BOUE->getOpcode(), IsXLHSInRHSPart, AO, Loc, Gen);
6533 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
6534 if (Res.first) {
6535 // 'atomicrmw' instruction was generated.
6536 if (IsPostfixUpdate) {
6537 // Use old value from 'atomicrmw'.
6538 NewVVal = Res.second;
6539 } else {
6540 // 'atomicrmw' does not provide new value, so evaluate it using old
6541 // value of 'x'.
6542 CodeGenFunction::OpaqueValueMapping MapExpr(CGF, ERValExpr, ExprRValue);
6543 CodeGenFunction::OpaqueValueMapping MapX(CGF, XRValExpr, Res.second);
6544 NewVVal = CGF.EmitAnyExpr(UE);
6547 } else {
6548 // 'x' is simply rewritten with some 'expr'.
6549 NewVValType = X->getType().getNonReferenceType();
6550 ExprRValue = convertToType(CGF, ExprRValue, E->getType(),
6551 X->getType().getNonReferenceType(), Loc);
6552 auto &&Gen = [&NewVVal, ExprRValue](RValue XRValue) {
6553 NewVVal = XRValue;
6554 return ExprRValue;
6556 // Try to perform atomicrmw xchg, otherwise simple exchange.
6557 auto Res = CGF.EmitOMPAtomicSimpleUpdateExpr(
6558 XLValue, ExprRValue, /*BO=*/BO_Assign, /*IsXLHSInRHSPart=*/false, AO,
6559 Loc, Gen);
6560 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, X);
6561 if (Res.first) {
6562 // 'atomicrmw' instruction was generated.
6563 NewVVal = IsPostfixUpdate ? Res.second : ExprRValue;
6566 // Emit post-update store to 'v' of old/new 'x' value.
6567 CGF.emitOMPSimpleStore(VLValue, NewVVal, NewVValType, Loc);
6568 CGF.CGM.getOpenMPRuntime().checkAndEmitLastprivateConditional(CGF, V);
6569 // OpenMP 5.1 removes the required flush for capture clause.
6570 if (CGF.CGM.getLangOpts().OpenMP < 51) {
6571 // OpenMP, 2.17.7, atomic Construct
6572 // If the write, update, or capture clause is specified and the release,
6573 // acq_rel, or seq_cst clause is specified then the strong flush on entry to
6574 // the atomic operation is also a release flush.
6575 // If the read or capture clause is specified and the acquire, acq_rel, or
6576 // seq_cst clause is specified then the strong flush on exit from the atomic
6577 // operation is also an acquire flush.
6578 switch (AO) {
6579 case llvm::AtomicOrdering::Release:
6580 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, {}, Loc,
6581 llvm::AtomicOrdering::Release);
6582 break;
6583 case llvm::AtomicOrdering::Acquire:
6584 CGF.CGM.getOpenMPRuntime().emitFlush(CGF, {}, Loc,
6585 llvm::AtomicOrdering::Acquire);
6586 break;
6587 case llvm::AtomicOrdering::AcquireRelease:
6588 case llvm::AtomicOrdering::SequentiallyConsistent:
6589 CGF.CGM.getOpenMPRuntime().emitFlush(
6590 CGF, {}, Loc, llvm::AtomicOrdering::AcquireRelease);
6591 break;
6592 case llvm::AtomicOrdering::Monotonic:
6593 break;
6594 case llvm::AtomicOrdering::NotAtomic:
6595 case llvm::AtomicOrdering::Unordered:
6596 llvm_unreachable("Unexpected ordering.");
6601 static void emitOMPAtomicCompareExpr(
6602 CodeGenFunction &CGF, llvm::AtomicOrdering AO, llvm::AtomicOrdering FailAO,
6603 const Expr *X, const Expr *V, const Expr *R, const Expr *E, const Expr *D,
6604 const Expr *CE, bool IsXBinopExpr, bool IsPostfixUpdate, bool IsFailOnly,
6605 SourceLocation Loc) {
6606 llvm::OpenMPIRBuilder &OMPBuilder =
6607 CGF.CGM.getOpenMPRuntime().getOMPBuilder();
6609 OMPAtomicCompareOp Op;
6610 assert(isa<BinaryOperator>(CE) && "CE is not a BinaryOperator");
6611 switch (cast<BinaryOperator>(CE)->getOpcode()) {
6612 case BO_EQ:
6613 Op = OMPAtomicCompareOp::EQ;
6614 break;
6615 case BO_LT:
6616 Op = OMPAtomicCompareOp::MIN;
6617 break;
6618 case BO_GT:
6619 Op = OMPAtomicCompareOp::MAX;
6620 break;
6621 default:
6622 llvm_unreachable("unsupported atomic compare binary operator");
6625 LValue XLVal = CGF.EmitLValue(X);
6626 Address XAddr = XLVal.getAddress();
6628 auto EmitRValueWithCastIfNeeded = [&CGF, Loc](const Expr *X, const Expr *E) {
6629 if (X->getType() == E->getType())
6630 return CGF.EmitScalarExpr(E);
6631 const Expr *NewE = E->IgnoreImplicitAsWritten();
6632 llvm::Value *V = CGF.EmitScalarExpr(NewE);
6633 if (NewE->getType() == X->getType())
6634 return V;
6635 return CGF.EmitScalarConversion(V, NewE->getType(), X->getType(), Loc);
6638 llvm::Value *EVal = EmitRValueWithCastIfNeeded(X, E);
6639 llvm::Value *DVal = D ? EmitRValueWithCastIfNeeded(X, D) : nullptr;
6640 if (auto *CI = dyn_cast<llvm::ConstantInt>(EVal))
6641 EVal = CGF.Builder.CreateIntCast(
6642 CI, XLVal.getAddress().getElementType(),
6643 E->getType()->hasSignedIntegerRepresentation());
6644 if (DVal)
6645 if (auto *CI = dyn_cast<llvm::ConstantInt>(DVal))
6646 DVal = CGF.Builder.CreateIntCast(
6647 CI, XLVal.getAddress().getElementType(),
6648 D->getType()->hasSignedIntegerRepresentation());
6650 llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
6651 XAddr.emitRawPointer(CGF), XAddr.getElementType(),
6652 X->getType()->hasSignedIntegerRepresentation(),
6653 X->getType().isVolatileQualified()};
6654 llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
6655 if (V) {
6656 LValue LV = CGF.EmitLValue(V);
6657 Address Addr = LV.getAddress();
6658 VOpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
6659 V->getType()->hasSignedIntegerRepresentation(),
6660 V->getType().isVolatileQualified()};
6662 if (R) {
6663 LValue LV = CGF.EmitLValue(R);
6664 Address Addr = LV.getAddress();
6665 ROpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
6666 R->getType()->hasSignedIntegerRepresentation(),
6667 R->getType().isVolatileQualified()};
6670 if (FailAO == llvm::AtomicOrdering::NotAtomic) {
6671 // fail clause was not mentioned on the
6672 // "#pragma omp atomic compare" construct.
6673 CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare(
6674 CGF.Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
6675 IsPostfixUpdate, IsFailOnly));
6676 } else
6677 CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare(
6678 CGF.Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
6679 IsPostfixUpdate, IsFailOnly, FailAO));
6682 static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
6683 llvm::AtomicOrdering AO,
6684 llvm::AtomicOrdering FailAO, bool IsPostfixUpdate,
6685 const Expr *X, const Expr *V, const Expr *R,
6686 const Expr *E, const Expr *UE, const Expr *D,
6687 const Expr *CE, bool IsXLHSInRHSPart,
6688 bool IsFailOnly, SourceLocation Loc) {
6689 switch (Kind) {
6690 case OMPC_read:
6691 emitOMPAtomicReadExpr(CGF, AO, X, V, Loc);
6692 break;
6693 case OMPC_write:
6694 emitOMPAtomicWriteExpr(CGF, AO, X, E, Loc);
6695 break;
6696 case OMPC_unknown:
6697 case OMPC_update:
6698 emitOMPAtomicUpdateExpr(CGF, AO, X, E, UE, IsXLHSInRHSPart, Loc);
6699 break;
6700 case OMPC_capture:
6701 emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE,
6702 IsXLHSInRHSPart, Loc);
6703 break;
6704 case OMPC_compare: {
6705 emitOMPAtomicCompareExpr(CGF, AO, FailAO, X, V, R, E, D, CE,
6706 IsXLHSInRHSPart, IsPostfixUpdate, IsFailOnly, Loc);
6707 break;
6709 default:
6710 llvm_unreachable("Clause is not allowed in 'omp atomic'.");
6714 void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
6715 llvm::AtomicOrdering AO = CGM.getOpenMPRuntime().getDefaultMemoryOrdering();
6716 // Fail Memory Clause Ordering.
6717 llvm::AtomicOrdering FailAO = llvm::AtomicOrdering::NotAtomic;
6718 bool MemOrderingSpecified = false;
6719 if (S.getSingleClause<OMPSeqCstClause>()) {
6720 AO = llvm::AtomicOrdering::SequentiallyConsistent;
6721 MemOrderingSpecified = true;
6722 } else if (S.getSingleClause<OMPAcqRelClause>()) {
6723 AO = llvm::AtomicOrdering::AcquireRelease;
6724 MemOrderingSpecified = true;
6725 } else if (S.getSingleClause<OMPAcquireClause>()) {
6726 AO = llvm::AtomicOrdering::Acquire;
6727 MemOrderingSpecified = true;
6728 } else if (S.getSingleClause<OMPReleaseClause>()) {
6729 AO = llvm::AtomicOrdering::Release;
6730 MemOrderingSpecified = true;
6731 } else if (S.getSingleClause<OMPRelaxedClause>()) {
6732 AO = llvm::AtomicOrdering::Monotonic;
6733 MemOrderingSpecified = true;
6735 llvm::SmallSet<OpenMPClauseKind, 2> KindsEncountered;
6736 OpenMPClauseKind Kind = OMPC_unknown;
6737 for (const OMPClause *C : S.clauses()) {
6738 // Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause,
6739 // if it is first).
6740 OpenMPClauseKind K = C->getClauseKind();
6741 // TBD
6742 if (K == OMPC_weak)
6743 return;
6744 if (K == OMPC_seq_cst || K == OMPC_acq_rel || K == OMPC_acquire ||
6745 K == OMPC_release || K == OMPC_relaxed || K == OMPC_hint)
6746 continue;
6747 Kind = K;
6748 KindsEncountered.insert(K);
6750 // We just need to correct Kind here. No need to set a bool saying it is
6751 // actually compare capture because we can tell from whether V and R are
6752 // nullptr.
6753 if (KindsEncountered.contains(OMPC_compare) &&
6754 KindsEncountered.contains(OMPC_capture))
6755 Kind = OMPC_compare;
6756 if (!MemOrderingSpecified) {
6757 llvm::AtomicOrdering DefaultOrder =
6758 CGM.getOpenMPRuntime().getDefaultMemoryOrdering();
6759 if (DefaultOrder == llvm::AtomicOrdering::Monotonic ||
6760 DefaultOrder == llvm::AtomicOrdering::SequentiallyConsistent ||
6761 (DefaultOrder == llvm::AtomicOrdering::AcquireRelease &&
6762 Kind == OMPC_capture)) {
6763 AO = DefaultOrder;
6764 } else if (DefaultOrder == llvm::AtomicOrdering::AcquireRelease) {
6765 if (Kind == OMPC_unknown || Kind == OMPC_update || Kind == OMPC_write) {
6766 AO = llvm::AtomicOrdering::Release;
6767 } else if (Kind == OMPC_read) {
6768 assert(Kind == OMPC_read && "Unexpected atomic kind.");
6769 AO = llvm::AtomicOrdering::Acquire;
6774 if (KindsEncountered.contains(OMPC_compare) &&
6775 KindsEncountered.contains(OMPC_fail)) {
6776 Kind = OMPC_compare;
6777 const auto *FailClause = S.getSingleClause<OMPFailClause>();
6778 if (FailClause) {
6779 OpenMPClauseKind FailParameter = FailClause->getFailParameter();
6780 if (FailParameter == llvm::omp::OMPC_relaxed)
6781 FailAO = llvm::AtomicOrdering::Monotonic;
6782 else if (FailParameter == llvm::omp::OMPC_acquire)
6783 FailAO = llvm::AtomicOrdering::Acquire;
6784 else if (FailParameter == llvm::omp::OMPC_seq_cst)
6785 FailAO = llvm::AtomicOrdering::SequentiallyConsistent;
6789 LexicalScope Scope(*this, S.getSourceRange());
6790 EmitStopPoint(S.getAssociatedStmt());
6791 emitOMPAtomicExpr(*this, Kind, AO, FailAO, S.isPostfixUpdate(), S.getX(),
6792 S.getV(), S.getR(), S.getExpr(), S.getUpdateExpr(),
6793 S.getD(), S.getCondExpr(), S.isXLHSInRHSPart(),
6794 S.isFailOnly(), S.getBeginLoc());
6797 static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
6798 const OMPExecutableDirective &S,
6799 const RegionCodeGenTy &CodeGen) {
6800 assert(isOpenMPTargetExecutionDirective(S.getDirectiveKind()));
6801 CodeGenModule &CGM = CGF.CGM;
6803 // On device emit this construct as inlined code.
6804 if (CGM.getLangOpts().OpenMPIsTargetDevice) {
6805 OMPLexicalScope Scope(CGF, S, OMPD_target);
6806 CGM.getOpenMPRuntime().emitInlinedDirective(
6807 CGF, OMPD_target, [&S](CodeGenFunction &CGF, PrePostActionTy &) {
6808 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
6810 return;
6813 auto LPCRegion = CGOpenMPRuntime::LastprivateConditionalRAII::disable(CGF, S);
6814 llvm::Function *Fn = nullptr;
6815 llvm::Constant *FnID = nullptr;
6817 const Expr *IfCond = nullptr;
6818 // Check for the at most one if clause associated with the target region.
6819 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
6820 if (C->getNameModifier() == OMPD_unknown ||
6821 C->getNameModifier() == OMPD_target) {
6822 IfCond = C->getCondition();
6823 break;
6827 // Check if we have any device clause associated with the directive.
6828 llvm::PointerIntPair<const Expr *, 2, OpenMPDeviceClauseModifier> Device(
6829 nullptr, OMPC_DEVICE_unknown);
6830 if (auto *C = S.getSingleClause<OMPDeviceClause>())
6831 Device.setPointerAndInt(C->getDevice(), C->getModifier());
6833 // Check if we have an if clause whose conditional always evaluates to false
6834 // or if we do not have any targets specified. If so the target region is not
6835 // an offload entry point.
6836 bool IsOffloadEntry = true;
6837 if (IfCond) {
6838 bool Val;
6839 if (CGF.ConstantFoldsToSimpleInteger(IfCond, Val) && !Val)
6840 IsOffloadEntry = false;
6842 if (CGM.getLangOpts().OMPTargetTriples.empty())
6843 IsOffloadEntry = false;
6845 if (CGM.getLangOpts().OpenMPOffloadMandatory && !IsOffloadEntry) {
6846 unsigned DiagID = CGM.getDiags().getCustomDiagID(
6847 DiagnosticsEngine::Error,
6848 "No offloading entry generated while offloading is mandatory.");
6849 CGM.getDiags().Report(DiagID);
6852 assert(CGF.CurFuncDecl && "No parent declaration for target region!");
6853 StringRef ParentName;
6854 // In case we have Ctors/Dtors we use the complete type variant to produce
6855 // the mangling of the device outlined kernel.
6856 if (const auto *D = dyn_cast<CXXConstructorDecl>(CGF.CurFuncDecl))
6857 ParentName = CGM.getMangledName(GlobalDecl(D, Ctor_Complete));
6858 else if (const auto *D = dyn_cast<CXXDestructorDecl>(CGF.CurFuncDecl))
6859 ParentName = CGM.getMangledName(GlobalDecl(D, Dtor_Complete));
6860 else
6861 ParentName =
6862 CGM.getMangledName(GlobalDecl(cast<FunctionDecl>(CGF.CurFuncDecl)));
6864 // Emit target region as a standalone region.
6865 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(S, ParentName, Fn, FnID,
6866 IsOffloadEntry, CodeGen);
6867 OMPLexicalScope Scope(CGF, S, OMPD_task);
6868 auto &&SizeEmitter =
6869 [IsOffloadEntry](CodeGenFunction &CGF,
6870 const OMPLoopDirective &D) -> llvm::Value * {
6871 if (IsOffloadEntry) {
6872 OMPLoopScope(CGF, D);
6873 // Emit calculation of the iterations count.
6874 llvm::Value *NumIterations = CGF.EmitScalarExpr(D.getNumIterations());
6875 NumIterations = CGF.Builder.CreateIntCast(NumIterations, CGF.Int64Ty,
6876 /*isSigned=*/false);
6877 return NumIterations;
6879 return nullptr;
6881 CGM.getOpenMPRuntime().emitTargetCall(CGF, S, Fn, FnID, IfCond, Device,
6882 SizeEmitter);
6885 static void emitTargetRegion(CodeGenFunction &CGF, const OMPTargetDirective &S,
6886 PrePostActionTy &Action) {
6887 Action.Enter(CGF);
6888 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6889 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
6890 CGF.EmitOMPPrivateClause(S, PrivateScope);
6891 (void)PrivateScope.Privatize();
6892 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
6893 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
6895 CGF.EmitStmt(S.getCapturedStmt(OMPD_target)->getCapturedStmt());
6896 CGF.EnsureInsertPoint();
6899 void CodeGenFunction::EmitOMPTargetDeviceFunction(CodeGenModule &CGM,
6900 StringRef ParentName,
6901 const OMPTargetDirective &S) {
6902 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6903 emitTargetRegion(CGF, S, Action);
6905 llvm::Function *Fn;
6906 llvm::Constant *Addr;
6907 // Emit target region as a standalone region.
6908 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
6909 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
6910 assert(Fn && Addr && "Target device function emission failed.");
6913 void CodeGenFunction::EmitOMPTargetDirective(const OMPTargetDirective &S) {
6914 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6915 emitTargetRegion(CGF, S, Action);
6917 emitCommonOMPTargetDirective(*this, S, CodeGen);
6920 static void emitCommonOMPTeamsDirective(CodeGenFunction &CGF,
6921 const OMPExecutableDirective &S,
6922 OpenMPDirectiveKind InnermostKind,
6923 const RegionCodeGenTy &CodeGen) {
6924 const CapturedStmt *CS = S.getCapturedStmt(OMPD_teams);
6925 llvm::Function *OutlinedFn =
6926 CGF.CGM.getOpenMPRuntime().emitTeamsOutlinedFunction(
6927 CGF, S, *CS->getCapturedDecl()->param_begin(), InnermostKind,
6928 CodeGen);
6930 const auto *NT = S.getSingleClause<OMPNumTeamsClause>();
6931 const auto *TL = S.getSingleClause<OMPThreadLimitClause>();
6932 if (NT || TL) {
6933 const Expr *NumTeams = NT ? NT->getNumTeams().front() : nullptr;
6934 const Expr *ThreadLimit = TL ? TL->getThreadLimit().front() : nullptr;
6936 CGF.CGM.getOpenMPRuntime().emitNumTeamsClause(CGF, NumTeams, ThreadLimit,
6937 S.getBeginLoc());
6940 OMPTeamsScope Scope(CGF, S);
6941 llvm::SmallVector<llvm::Value *, 16> CapturedVars;
6942 CGF.GenerateOpenMPCapturedVars(*CS, CapturedVars);
6943 CGF.CGM.getOpenMPRuntime().emitTeamsCall(CGF, S, S.getBeginLoc(), OutlinedFn,
6944 CapturedVars);
6947 void CodeGenFunction::EmitOMPTeamsDirective(const OMPTeamsDirective &S) {
6948 // Emit teams region as a standalone region.
6949 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6950 Action.Enter(CGF);
6951 OMPPrivateScope PrivateScope(CGF);
6952 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
6953 CGF.EmitOMPPrivateClause(S, PrivateScope);
6954 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6955 (void)PrivateScope.Privatize();
6956 CGF.EmitStmt(S.getCapturedStmt(OMPD_teams)->getCapturedStmt());
6957 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6959 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
6960 emitPostUpdateForReductionClause(*this, S,
6961 [](CodeGenFunction &) { return nullptr; });
6964 static void emitTargetTeamsRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
6965 const OMPTargetTeamsDirective &S) {
6966 auto *CS = S.getCapturedStmt(OMPD_teams);
6967 Action.Enter(CGF);
6968 // Emit teams region as a standalone region.
6969 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
6970 Action.Enter(CGF);
6971 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
6972 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
6973 CGF.EmitOMPPrivateClause(S, PrivateScope);
6974 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
6975 (void)PrivateScope.Privatize();
6976 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
6977 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
6978 CGF.EmitStmt(CS->getCapturedStmt());
6979 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
6981 emitCommonOMPTeamsDirective(CGF, S, OMPD_teams, CodeGen);
6982 emitPostUpdateForReductionClause(CGF, S,
6983 [](CodeGenFunction &) { return nullptr; });
6986 void CodeGenFunction::EmitOMPTargetTeamsDeviceFunction(
6987 CodeGenModule &CGM, StringRef ParentName,
6988 const OMPTargetTeamsDirective &S) {
6989 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
6990 emitTargetTeamsRegion(CGF, Action, S);
6992 llvm::Function *Fn;
6993 llvm::Constant *Addr;
6994 // Emit target region as a standalone region.
6995 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
6996 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
6997 assert(Fn && Addr && "Target device function emission failed.");
7000 void CodeGenFunction::EmitOMPTargetTeamsDirective(
7001 const OMPTargetTeamsDirective &S) {
7002 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7003 emitTargetTeamsRegion(CGF, Action, S);
7005 emitCommonOMPTargetDirective(*this, S, CodeGen);
7008 static void
7009 emitTargetTeamsDistributeRegion(CodeGenFunction &CGF, PrePostActionTy &Action,
7010 const OMPTargetTeamsDistributeDirective &S) {
7011 Action.Enter(CGF);
7012 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7013 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
7016 // Emit teams region as a standalone region.
7017 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7018 PrePostActionTy &Action) {
7019 Action.Enter(CGF);
7020 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7021 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7022 (void)PrivateScope.Privatize();
7023 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
7024 CodeGenDistribute);
7025 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7027 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen);
7028 emitPostUpdateForReductionClause(CGF, S,
7029 [](CodeGenFunction &) { return nullptr; });
7032 void CodeGenFunction::EmitOMPTargetTeamsDistributeDeviceFunction(
7033 CodeGenModule &CGM, StringRef ParentName,
7034 const OMPTargetTeamsDistributeDirective &S) {
7035 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7036 emitTargetTeamsDistributeRegion(CGF, Action, S);
7038 llvm::Function *Fn;
7039 llvm::Constant *Addr;
7040 // Emit target region as a standalone region.
7041 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7042 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7043 assert(Fn && Addr && "Target device function emission failed.");
7046 void CodeGenFunction::EmitOMPTargetTeamsDistributeDirective(
7047 const OMPTargetTeamsDistributeDirective &S) {
7048 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7049 emitTargetTeamsDistributeRegion(CGF, Action, S);
7051 emitCommonOMPTargetDirective(*this, S, CodeGen);
7054 static void emitTargetTeamsDistributeSimdRegion(
7055 CodeGenFunction &CGF, PrePostActionTy &Action,
7056 const OMPTargetTeamsDistributeSimdDirective &S) {
7057 Action.Enter(CGF);
7058 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7059 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
7062 // Emit teams region as a standalone region.
7063 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7064 PrePostActionTy &Action) {
7065 Action.Enter(CGF);
7066 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7067 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7068 (void)PrivateScope.Privatize();
7069 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
7070 CodeGenDistribute);
7071 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7073 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_simd, CodeGen);
7074 emitPostUpdateForReductionClause(CGF, S,
7075 [](CodeGenFunction &) { return nullptr; });
7078 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDeviceFunction(
7079 CodeGenModule &CGM, StringRef ParentName,
7080 const OMPTargetTeamsDistributeSimdDirective &S) {
7081 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7082 emitTargetTeamsDistributeSimdRegion(CGF, Action, S);
7084 llvm::Function *Fn;
7085 llvm::Constant *Addr;
7086 // Emit target region as a standalone region.
7087 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7088 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7089 assert(Fn && Addr && "Target device function emission failed.");
7092 void CodeGenFunction::EmitOMPTargetTeamsDistributeSimdDirective(
7093 const OMPTargetTeamsDistributeSimdDirective &S) {
7094 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7095 emitTargetTeamsDistributeSimdRegion(CGF, Action, S);
7097 emitCommonOMPTargetDirective(*this, S, CodeGen);
7100 void CodeGenFunction::EmitOMPTeamsDistributeDirective(
7101 const OMPTeamsDistributeDirective &S) {
7103 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7104 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
7107 // Emit teams region as a standalone region.
7108 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7109 PrePostActionTy &Action) {
7110 Action.Enter(CGF);
7111 OMPPrivateScope PrivateScope(CGF);
7112 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7113 (void)PrivateScope.Privatize();
7114 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
7115 CodeGenDistribute);
7116 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7118 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
7119 emitPostUpdateForReductionClause(*this, S,
7120 [](CodeGenFunction &) { return nullptr; });
7123 void CodeGenFunction::EmitOMPTeamsDistributeSimdDirective(
7124 const OMPTeamsDistributeSimdDirective &S) {
7125 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7126 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
7129 // Emit teams region as a standalone region.
7130 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7131 PrePostActionTy &Action) {
7132 Action.Enter(CGF);
7133 OMPPrivateScope PrivateScope(CGF);
7134 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7135 (void)PrivateScope.Privatize();
7136 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_simd,
7137 CodeGenDistribute);
7138 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7140 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_simd, CodeGen);
7141 emitPostUpdateForReductionClause(*this, S,
7142 [](CodeGenFunction &) { return nullptr; });
7145 void CodeGenFunction::EmitOMPTeamsDistributeParallelForDirective(
7146 const OMPTeamsDistributeParallelForDirective &S) {
7147 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7148 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
7149 S.getDistInc());
7152 // Emit teams region as a standalone region.
7153 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7154 PrePostActionTy &Action) {
7155 Action.Enter(CGF);
7156 OMPPrivateScope PrivateScope(CGF);
7157 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7158 (void)PrivateScope.Privatize();
7159 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
7160 CodeGenDistribute);
7161 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7163 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for, CodeGen);
7164 emitPostUpdateForReductionClause(*this, S,
7165 [](CodeGenFunction &) { return nullptr; });
7168 void CodeGenFunction::EmitOMPTeamsDistributeParallelForSimdDirective(
7169 const OMPTeamsDistributeParallelForSimdDirective &S) {
7170 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7171 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
7172 S.getDistInc());
7175 // Emit teams region as a standalone region.
7176 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7177 PrePostActionTy &Action) {
7178 Action.Enter(CGF);
7179 OMPPrivateScope PrivateScope(CGF);
7180 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7181 (void)PrivateScope.Privatize();
7182 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
7183 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
7184 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7186 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute_parallel_for_simd,
7187 CodeGen);
7188 emitPostUpdateForReductionClause(*this, S,
7189 [](CodeGenFunction &) { return nullptr; });
7192 void CodeGenFunction::EmitOMPInteropDirective(const OMPInteropDirective &S) {
7193 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
7194 llvm::Value *Device = nullptr;
7195 llvm::Value *NumDependences = nullptr;
7196 llvm::Value *DependenceList = nullptr;
7198 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
7199 Device = EmitScalarExpr(C->getDevice());
7201 // Build list and emit dependences
7202 OMPTaskDataTy Data;
7203 buildDependences(S, Data);
7204 if (!Data.Dependences.empty()) {
7205 Address DependenciesArray = Address::invalid();
7206 std::tie(NumDependences, DependenciesArray) =
7207 CGM.getOpenMPRuntime().emitDependClause(*this, Data.Dependences,
7208 S.getBeginLoc());
7209 DependenceList = DependenciesArray.emitRawPointer(*this);
7211 Data.HasNowaitClause = S.hasClausesOfKind<OMPNowaitClause>();
7213 assert(!(Data.HasNowaitClause && !(S.getSingleClause<OMPInitClause>() ||
7214 S.getSingleClause<OMPDestroyClause>() ||
7215 S.getSingleClause<OMPUseClause>())) &&
7216 "OMPNowaitClause clause is used separately in OMPInteropDirective.");
7218 auto ItOMPInitClause = S.getClausesOfKind<OMPInitClause>();
7219 if (!ItOMPInitClause.empty()) {
7220 // Look at the multiple init clauses
7221 for (const OMPInitClause *C : ItOMPInitClause) {
7222 llvm::Value *InteropvarPtr =
7223 EmitLValue(C->getInteropVar()).getPointer(*this);
7224 llvm::omp::OMPInteropType InteropType =
7225 llvm::omp::OMPInteropType::Unknown;
7226 if (C->getIsTarget()) {
7227 InteropType = llvm::omp::OMPInteropType::Target;
7228 } else {
7229 assert(C->getIsTargetSync() &&
7230 "Expected interop-type target/targetsync");
7231 InteropType = llvm::omp::OMPInteropType::TargetSync;
7233 OMPBuilder.createOMPInteropInit(Builder, InteropvarPtr, InteropType,
7234 Device, NumDependences, DependenceList,
7235 Data.HasNowaitClause);
7238 auto ItOMPDestroyClause = S.getClausesOfKind<OMPDestroyClause>();
7239 if (!ItOMPDestroyClause.empty()) {
7240 // Look at the multiple destroy clauses
7241 for (const OMPDestroyClause *C : ItOMPDestroyClause) {
7242 llvm::Value *InteropvarPtr =
7243 EmitLValue(C->getInteropVar()).getPointer(*this);
7244 OMPBuilder.createOMPInteropDestroy(Builder, InteropvarPtr, Device,
7245 NumDependences, DependenceList,
7246 Data.HasNowaitClause);
7249 auto ItOMPUseClause = S.getClausesOfKind<OMPUseClause>();
7250 if (!ItOMPUseClause.empty()) {
7251 // Look at the multiple use clauses
7252 for (const OMPUseClause *C : ItOMPUseClause) {
7253 llvm::Value *InteropvarPtr =
7254 EmitLValue(C->getInteropVar()).getPointer(*this);
7255 OMPBuilder.createOMPInteropUse(Builder, InteropvarPtr, Device,
7256 NumDependences, DependenceList,
7257 Data.HasNowaitClause);
7262 static void emitTargetTeamsDistributeParallelForRegion(
7263 CodeGenFunction &CGF, const OMPTargetTeamsDistributeParallelForDirective &S,
7264 PrePostActionTy &Action) {
7265 Action.Enter(CGF);
7266 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7267 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
7268 S.getDistInc());
7271 // Emit teams region as a standalone region.
7272 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7273 PrePostActionTy &Action) {
7274 Action.Enter(CGF);
7275 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7276 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7277 (void)PrivateScope.Privatize();
7278 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
7279 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
7280 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7283 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
7284 CodeGenTeams);
7285 emitPostUpdateForReductionClause(CGF, S,
7286 [](CodeGenFunction &) { return nullptr; });
7289 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDeviceFunction(
7290 CodeGenModule &CGM, StringRef ParentName,
7291 const OMPTargetTeamsDistributeParallelForDirective &S) {
7292 // Emit SPMD target teams distribute parallel for region as a standalone
7293 // region.
7294 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7295 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
7297 llvm::Function *Fn;
7298 llvm::Constant *Addr;
7299 // Emit target region as a standalone region.
7300 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7301 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7302 assert(Fn && Addr && "Target device function emission failed.");
7305 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForDirective(
7306 const OMPTargetTeamsDistributeParallelForDirective &S) {
7307 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7308 emitTargetTeamsDistributeParallelForRegion(CGF, S, Action);
7310 emitCommonOMPTargetDirective(*this, S, CodeGen);
7313 static void emitTargetTeamsDistributeParallelForSimdRegion(
7314 CodeGenFunction &CGF,
7315 const OMPTargetTeamsDistributeParallelForSimdDirective &S,
7316 PrePostActionTy &Action) {
7317 Action.Enter(CGF);
7318 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7319 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
7320 S.getDistInc());
7323 // Emit teams region as a standalone region.
7324 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
7325 PrePostActionTy &Action) {
7326 Action.Enter(CGF);
7327 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7328 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7329 (void)PrivateScope.Privatize();
7330 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
7331 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
7332 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
7335 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for_simd,
7336 CodeGenTeams);
7337 emitPostUpdateForReductionClause(CGF, S,
7338 [](CodeGenFunction &) { return nullptr; });
7341 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDeviceFunction(
7342 CodeGenModule &CGM, StringRef ParentName,
7343 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
7344 // Emit SPMD target teams distribute parallel for simd region as a standalone
7345 // region.
7346 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7347 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
7349 llvm::Function *Fn;
7350 llvm::Constant *Addr;
7351 // Emit target region as a standalone region.
7352 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7353 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7354 assert(Fn && Addr && "Target device function emission failed.");
7357 void CodeGenFunction::EmitOMPTargetTeamsDistributeParallelForSimdDirective(
7358 const OMPTargetTeamsDistributeParallelForSimdDirective &S) {
7359 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7360 emitTargetTeamsDistributeParallelForSimdRegion(CGF, S, Action);
7362 emitCommonOMPTargetDirective(*this, S, CodeGen);
7365 void CodeGenFunction::EmitOMPCancellationPointDirective(
7366 const OMPCancellationPointDirective &S) {
7367 CGM.getOpenMPRuntime().emitCancellationPointCall(*this, S.getBeginLoc(),
7368 S.getCancelRegion());
7371 void CodeGenFunction::EmitOMPCancelDirective(const OMPCancelDirective &S) {
7372 const Expr *IfCond = nullptr;
7373 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
7374 if (C->getNameModifier() == OMPD_unknown ||
7375 C->getNameModifier() == OMPD_cancel) {
7376 IfCond = C->getCondition();
7377 break;
7380 if (CGM.getLangOpts().OpenMPIRBuilder) {
7381 llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
7382 // TODO: This check is necessary as we only generate `omp parallel` through
7383 // the OpenMPIRBuilder for now.
7384 if (S.getCancelRegion() == OMPD_parallel ||
7385 S.getCancelRegion() == OMPD_sections ||
7386 S.getCancelRegion() == OMPD_section) {
7387 llvm::Value *IfCondition = nullptr;
7388 if (IfCond)
7389 IfCondition = EmitScalarExpr(IfCond,
7390 /*IgnoreResultAssign=*/true);
7391 llvm::OpenMPIRBuilder::InsertPointOrErrorTy AfterIP =
7392 OMPBuilder.createCancel(Builder, IfCondition, S.getCancelRegion());
7393 assert(AfterIP && "unexpected error creating cancel");
7394 return Builder.restoreIP(*AfterIP);
7398 CGM.getOpenMPRuntime().emitCancelCall(*this, S.getBeginLoc(), IfCond,
7399 S.getCancelRegion());
7402 CodeGenFunction::JumpDest
7403 CodeGenFunction::getOMPCancelDestination(OpenMPDirectiveKind Kind) {
7404 if (Kind == OMPD_parallel || Kind == OMPD_task ||
7405 Kind == OMPD_target_parallel || Kind == OMPD_taskloop ||
7406 Kind == OMPD_master_taskloop || Kind == OMPD_parallel_master_taskloop)
7407 return ReturnBlock;
7408 assert(Kind == OMPD_for || Kind == OMPD_section || Kind == OMPD_sections ||
7409 Kind == OMPD_parallel_sections || Kind == OMPD_parallel_for ||
7410 Kind == OMPD_distribute_parallel_for ||
7411 Kind == OMPD_target_parallel_for ||
7412 Kind == OMPD_teams_distribute_parallel_for ||
7413 Kind == OMPD_target_teams_distribute_parallel_for);
7414 return OMPCancelStack.getExitBlock();
7417 void CodeGenFunction::EmitOMPUseDevicePtrClause(
7418 const OMPUseDevicePtrClause &C, OMPPrivateScope &PrivateScope,
7419 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
7420 CaptureDeviceAddrMap) {
7421 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
7422 for (const Expr *OrigVarIt : C.varlist()) {
7423 const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(OrigVarIt)->getDecl());
7424 if (!Processed.insert(OrigVD).second)
7425 continue;
7427 // In order to identify the right initializer we need to match the
7428 // declaration used by the mapping logic. In some cases we may get
7429 // OMPCapturedExprDecl that refers to the original declaration.
7430 const ValueDecl *MatchingVD = OrigVD;
7431 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
7432 // OMPCapturedExprDecl are used to privative fields of the current
7433 // structure.
7434 const auto *ME = cast<MemberExpr>(OED->getInit());
7435 assert(isa<CXXThisExpr>(ME->getBase()->IgnoreImpCasts()) &&
7436 "Base should be the current struct!");
7437 MatchingVD = ME->getMemberDecl();
7440 // If we don't have information about the current list item, move on to
7441 // the next one.
7442 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
7443 if (InitAddrIt == CaptureDeviceAddrMap.end())
7444 continue;
7446 llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType());
7448 // Return the address of the private variable.
7449 bool IsRegistered = PrivateScope.addPrivate(
7450 OrigVD,
7451 Address(InitAddrIt->second, Ty,
7452 getContext().getTypeAlignInChars(getContext().VoidPtrTy)));
7453 assert(IsRegistered && "firstprivate var already registered as private");
7454 // Silence the warning about unused variable.
7455 (void)IsRegistered;
7459 static const VarDecl *getBaseDecl(const Expr *Ref) {
7460 const Expr *Base = Ref->IgnoreParenImpCasts();
7461 while (const auto *OASE = dyn_cast<ArraySectionExpr>(Base))
7462 Base = OASE->getBase()->IgnoreParenImpCasts();
7463 while (const auto *ASE = dyn_cast<ArraySubscriptExpr>(Base))
7464 Base = ASE->getBase()->IgnoreParenImpCasts();
7465 return cast<VarDecl>(cast<DeclRefExpr>(Base)->getDecl());
7468 void CodeGenFunction::EmitOMPUseDeviceAddrClause(
7469 const OMPUseDeviceAddrClause &C, OMPPrivateScope &PrivateScope,
7470 const llvm::DenseMap<const ValueDecl *, llvm::Value *>
7471 CaptureDeviceAddrMap) {
7472 llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>, 4> Processed;
7473 for (const Expr *Ref : C.varlist()) {
7474 const VarDecl *OrigVD = getBaseDecl(Ref);
7475 if (!Processed.insert(OrigVD).second)
7476 continue;
7477 // In order to identify the right initializer we need to match the
7478 // declaration used by the mapping logic. In some cases we may get
7479 // OMPCapturedExprDecl that refers to the original declaration.
7480 const ValueDecl *MatchingVD = OrigVD;
7481 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(MatchingVD)) {
7482 // OMPCapturedExprDecl are used to privative fields of the current
7483 // structure.
7484 const auto *ME = cast<MemberExpr>(OED->getInit());
7485 assert(isa<CXXThisExpr>(ME->getBase()) &&
7486 "Base should be the current struct!");
7487 MatchingVD = ME->getMemberDecl();
7490 // If we don't have information about the current list item, move on to
7491 // the next one.
7492 auto InitAddrIt = CaptureDeviceAddrMap.find(MatchingVD);
7493 if (InitAddrIt == CaptureDeviceAddrMap.end())
7494 continue;
7496 llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType());
7498 Address PrivAddr =
7499 Address(InitAddrIt->second, Ty,
7500 getContext().getTypeAlignInChars(getContext().VoidPtrTy));
7501 // For declrefs and variable length array need to load the pointer for
7502 // correct mapping, since the pointer to the data was passed to the runtime.
7503 if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) ||
7504 MatchingVD->getType()->isArrayType()) {
7505 QualType PtrTy = getContext().getPointerType(
7506 OrigVD->getType().getNonReferenceType());
7507 PrivAddr =
7508 EmitLoadOfPointer(PrivAddr.withElementType(ConvertTypeForMem(PtrTy)),
7509 PtrTy->castAs<PointerType>());
7512 (void)PrivateScope.addPrivate(OrigVD, PrivAddr);
7516 // Generate the instructions for '#pragma omp target data' directive.
7517 void CodeGenFunction::EmitOMPTargetDataDirective(
7518 const OMPTargetDataDirective &S) {
7519 CGOpenMPRuntime::TargetDataInfo Info(/*RequiresDevicePointerInfo=*/true,
7520 /*SeparateBeginEndCalls=*/true);
7522 // Create a pre/post action to signal the privatization of the device pointer.
7523 // This action can be replaced by the OpenMP runtime code generation to
7524 // deactivate privatization.
7525 bool PrivatizeDevicePointers = false;
7526 class DevicePointerPrivActionTy : public PrePostActionTy {
7527 bool &PrivatizeDevicePointers;
7529 public:
7530 explicit DevicePointerPrivActionTy(bool &PrivatizeDevicePointers)
7531 : PrivatizeDevicePointers(PrivatizeDevicePointers) {}
7532 void Enter(CodeGenFunction &CGF) override {
7533 PrivatizeDevicePointers = true;
7536 DevicePointerPrivActionTy PrivAction(PrivatizeDevicePointers);
7538 auto &&CodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &Action) {
7539 auto &&InnermostCodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7540 CGF.EmitStmt(S.getInnermostCapturedStmt()->getCapturedStmt());
7543 // Codegen that selects whether to generate the privatization code or not.
7544 auto &&PrivCodeGen = [&](CodeGenFunction &CGF, PrePostActionTy &Action) {
7545 RegionCodeGenTy RCG(InnermostCodeGen);
7546 PrivatizeDevicePointers = false;
7548 // Call the pre-action to change the status of PrivatizeDevicePointers if
7549 // needed.
7550 Action.Enter(CGF);
7552 if (PrivatizeDevicePointers) {
7553 OMPPrivateScope PrivateScope(CGF);
7554 // Emit all instances of the use_device_ptr clause.
7555 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
7556 CGF.EmitOMPUseDevicePtrClause(*C, PrivateScope,
7557 Info.CaptureDeviceAddrMap);
7558 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>())
7559 CGF.EmitOMPUseDeviceAddrClause(*C, PrivateScope,
7560 Info.CaptureDeviceAddrMap);
7561 (void)PrivateScope.Privatize();
7562 RCG(CGF);
7563 } else {
7564 // If we don't have target devices, don't bother emitting the data
7565 // mapping code.
7566 std::optional<OpenMPDirectiveKind> CaptureRegion;
7567 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
7568 // Emit helper decls of the use_device_ptr/use_device_addr clauses.
7569 for (const auto *C : S.getClausesOfKind<OMPUseDevicePtrClause>())
7570 for (const Expr *E : C->varlist()) {
7571 const Decl *D = cast<DeclRefExpr>(E)->getDecl();
7572 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
7573 CGF.EmitVarDecl(*OED);
7575 for (const auto *C : S.getClausesOfKind<OMPUseDeviceAddrClause>())
7576 for (const Expr *E : C->varlist()) {
7577 const Decl *D = getBaseDecl(E);
7578 if (const auto *OED = dyn_cast<OMPCapturedExprDecl>(D))
7579 CGF.EmitVarDecl(*OED);
7581 } else {
7582 CaptureRegion = OMPD_unknown;
7585 OMPLexicalScope Scope(CGF, S, CaptureRegion);
7586 RCG(CGF);
7590 // Forward the provided action to the privatization codegen.
7591 RegionCodeGenTy PrivRCG(PrivCodeGen);
7592 PrivRCG.setAction(Action);
7594 // Notwithstanding the body of the region is emitted as inlined directive,
7595 // we don't use an inline scope as changes in the references inside the
7596 // region are expected to be visible outside, so we do not privative them.
7597 OMPLexicalScope Scope(CGF, S);
7598 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_target_data,
7599 PrivRCG);
7602 RegionCodeGenTy RCG(CodeGen);
7604 // If we don't have target devices, don't bother emitting the data mapping
7605 // code.
7606 if (CGM.getLangOpts().OMPTargetTriples.empty()) {
7607 RCG(*this);
7608 return;
7611 // Check if we have any if clause associated with the directive.
7612 const Expr *IfCond = nullptr;
7613 if (const auto *C = S.getSingleClause<OMPIfClause>())
7614 IfCond = C->getCondition();
7616 // Check if we have any device clause associated with the directive.
7617 const Expr *Device = nullptr;
7618 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
7619 Device = C->getDevice();
7621 // Set the action to signal privatization of device pointers.
7622 RCG.setAction(PrivAction);
7624 // Emit region code.
7625 CGM.getOpenMPRuntime().emitTargetDataCalls(*this, S, IfCond, Device, RCG,
7626 Info);
7629 void CodeGenFunction::EmitOMPTargetEnterDataDirective(
7630 const OMPTargetEnterDataDirective &S) {
7631 // If we don't have target devices, don't bother emitting the data mapping
7632 // code.
7633 if (CGM.getLangOpts().OMPTargetTriples.empty())
7634 return;
7636 // Check if we have any if clause associated with the directive.
7637 const Expr *IfCond = nullptr;
7638 if (const auto *C = S.getSingleClause<OMPIfClause>())
7639 IfCond = C->getCondition();
7641 // Check if we have any device clause associated with the directive.
7642 const Expr *Device = nullptr;
7643 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
7644 Device = C->getDevice();
7646 OMPLexicalScope Scope(*this, S, OMPD_task);
7647 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
7650 void CodeGenFunction::EmitOMPTargetExitDataDirective(
7651 const OMPTargetExitDataDirective &S) {
7652 // If we don't have target devices, don't bother emitting the data mapping
7653 // code.
7654 if (CGM.getLangOpts().OMPTargetTriples.empty())
7655 return;
7657 // Check if we have any if clause associated with the directive.
7658 const Expr *IfCond = nullptr;
7659 if (const auto *C = S.getSingleClause<OMPIfClause>())
7660 IfCond = C->getCondition();
7662 // Check if we have any device clause associated with the directive.
7663 const Expr *Device = nullptr;
7664 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
7665 Device = C->getDevice();
7667 OMPLexicalScope Scope(*this, S, OMPD_task);
7668 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
7671 static void emitTargetParallelRegion(CodeGenFunction &CGF,
7672 const OMPTargetParallelDirective &S,
7673 PrePostActionTy &Action) {
7674 // Get the captured statement associated with the 'parallel' region.
7675 const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
7676 Action.Enter(CGF);
7677 auto &&CodeGen = [&S, CS](CodeGenFunction &CGF, PrePostActionTy &Action) {
7678 Action.Enter(CGF);
7679 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
7680 (void)CGF.EmitOMPFirstprivateClause(S, PrivateScope);
7681 CGF.EmitOMPPrivateClause(S, PrivateScope);
7682 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
7683 (void)PrivateScope.Privatize();
7684 if (isOpenMPTargetExecutionDirective(S.getDirectiveKind()))
7685 CGF.CGM.getOpenMPRuntime().adjustTargetSpecificDataForLambdas(CGF, S);
7686 // TODO: Add support for clauses.
7687 CGF.EmitStmt(CS->getCapturedStmt());
7688 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_parallel);
7690 emitCommonOMPParallelDirective(CGF, S, OMPD_parallel, CodeGen,
7691 emitEmptyBoundParameters);
7692 emitPostUpdateForReductionClause(CGF, S,
7693 [](CodeGenFunction &) { return nullptr; });
7696 void CodeGenFunction::EmitOMPTargetParallelDeviceFunction(
7697 CodeGenModule &CGM, StringRef ParentName,
7698 const OMPTargetParallelDirective &S) {
7699 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7700 emitTargetParallelRegion(CGF, S, Action);
7702 llvm::Function *Fn;
7703 llvm::Constant *Addr;
7704 // Emit target region as a standalone region.
7705 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7706 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7707 assert(Fn && Addr && "Target device function emission failed.");
7710 void CodeGenFunction::EmitOMPTargetParallelDirective(
7711 const OMPTargetParallelDirective &S) {
7712 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7713 emitTargetParallelRegion(CGF, S, Action);
7715 emitCommonOMPTargetDirective(*this, S, CodeGen);
7718 static void emitTargetParallelForRegion(CodeGenFunction &CGF,
7719 const OMPTargetParallelForDirective &S,
7720 PrePostActionTy &Action) {
7721 Action.Enter(CGF);
7722 // Emit directive as a combined directive that consists of two implicit
7723 // directives: 'parallel' with 'for' directive.
7724 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7725 Action.Enter(CGF);
7726 CodeGenFunction::OMPCancelStackRAII CancelRegion(
7727 CGF, OMPD_target_parallel_for, S.hasCancel());
7728 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
7729 emitDispatchForLoopBounds);
7731 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen,
7732 emitEmptyBoundParameters);
7735 void CodeGenFunction::EmitOMPTargetParallelForDeviceFunction(
7736 CodeGenModule &CGM, StringRef ParentName,
7737 const OMPTargetParallelForDirective &S) {
7738 // Emit SPMD target parallel for region as a standalone region.
7739 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7740 emitTargetParallelForRegion(CGF, S, Action);
7742 llvm::Function *Fn;
7743 llvm::Constant *Addr;
7744 // Emit target region as a standalone region.
7745 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7746 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7747 assert(Fn && Addr && "Target device function emission failed.");
7750 void CodeGenFunction::EmitOMPTargetParallelForDirective(
7751 const OMPTargetParallelForDirective &S) {
7752 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7753 emitTargetParallelForRegion(CGF, S, Action);
7755 emitCommonOMPTargetDirective(*this, S, CodeGen);
7758 static void
7759 emitTargetParallelForSimdRegion(CodeGenFunction &CGF,
7760 const OMPTargetParallelForSimdDirective &S,
7761 PrePostActionTy &Action) {
7762 Action.Enter(CGF);
7763 // Emit directive as a combined directive that consists of two implicit
7764 // directives: 'parallel' with 'for' directive.
7765 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7766 Action.Enter(CGF);
7767 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
7768 emitDispatchForLoopBounds);
7770 emitCommonOMPParallelDirective(CGF, S, OMPD_simd, CodeGen,
7771 emitEmptyBoundParameters);
7774 void CodeGenFunction::EmitOMPTargetParallelForSimdDeviceFunction(
7775 CodeGenModule &CGM, StringRef ParentName,
7776 const OMPTargetParallelForSimdDirective &S) {
7777 // Emit SPMD target parallel for region as a standalone region.
7778 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7779 emitTargetParallelForSimdRegion(CGF, S, Action);
7781 llvm::Function *Fn;
7782 llvm::Constant *Addr;
7783 // Emit target region as a standalone region.
7784 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
7785 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
7786 assert(Fn && Addr && "Target device function emission failed.");
7789 void CodeGenFunction::EmitOMPTargetParallelForSimdDirective(
7790 const OMPTargetParallelForSimdDirective &S) {
7791 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7792 emitTargetParallelForSimdRegion(CGF, S, Action);
7794 emitCommonOMPTargetDirective(*this, S, CodeGen);
7797 /// Emit a helper variable and return corresponding lvalue.
7798 static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
7799 const ImplicitParamDecl *PVD,
7800 CodeGenFunction::OMPPrivateScope &Privates) {
7801 const auto *VDecl = cast<VarDecl>(Helper->getDecl());
7802 Privates.addPrivate(VDecl, CGF.GetAddrOfLocalVar(PVD));
7805 void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
7806 assert(isOpenMPTaskLoopDirective(S.getDirectiveKind()));
7807 // Emit outlined function for task construct.
7808 const CapturedStmt *CS = S.getCapturedStmt(OMPD_taskloop);
7809 Address CapturedStruct = Address::invalid();
7811 OMPLexicalScope Scope(*this, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
7812 CapturedStruct = GenerateCapturedStmtArgument(*CS);
7814 QualType SharedsTy = getContext().getRecordType(CS->getCapturedRecordDecl());
7815 const Expr *IfCond = nullptr;
7816 for (const auto *C : S.getClausesOfKind<OMPIfClause>()) {
7817 if (C->getNameModifier() == OMPD_unknown ||
7818 C->getNameModifier() == OMPD_taskloop) {
7819 IfCond = C->getCondition();
7820 break;
7824 OMPTaskDataTy Data;
7825 // Check if taskloop must be emitted without taskgroup.
7826 Data.Nogroup = S.getSingleClause<OMPNogroupClause>();
7827 // TODO: Check if we should emit tied or untied task.
7828 Data.Tied = true;
7829 // Set scheduling for taskloop
7830 if (const auto *Clause = S.getSingleClause<OMPGrainsizeClause>()) {
7831 // grainsize clause
7832 Data.Schedule.setInt(/*IntVal=*/false);
7833 Data.Schedule.setPointer(EmitScalarExpr(Clause->getGrainsize()));
7834 } else if (const auto *Clause = S.getSingleClause<OMPNumTasksClause>()) {
7835 // num_tasks clause
7836 Data.Schedule.setInt(/*IntVal=*/true);
7837 Data.Schedule.setPointer(EmitScalarExpr(Clause->getNumTasks()));
7840 auto &&BodyGen = [CS, &S](CodeGenFunction &CGF, PrePostActionTy &) {
7841 // if (PreCond) {
7842 // for (IV in 0..LastIteration) BODY;
7843 // <Final counter/linear vars updates>;
7844 // }
7847 // Emit: if (PreCond) - begin.
7848 // If the condition constant folds and can be elided, avoid emitting the
7849 // whole loop.
7850 bool CondConstant;
7851 llvm::BasicBlock *ContBlock = nullptr;
7852 OMPLoopScope PreInitScope(CGF, S);
7853 if (CGF.ConstantFoldsToSimpleInteger(S.getPreCond(), CondConstant)) {
7854 if (!CondConstant)
7855 return;
7856 } else {
7857 llvm::BasicBlock *ThenBlock = CGF.createBasicBlock("taskloop.if.then");
7858 ContBlock = CGF.createBasicBlock("taskloop.if.end");
7859 emitPreCond(CGF, S, S.getPreCond(), ThenBlock, ContBlock,
7860 CGF.getProfileCount(&S));
7861 CGF.EmitBlock(ThenBlock);
7862 CGF.incrementProfileCounter(&S);
7865 (void)CGF.EmitOMPLinearClauseInit(S);
7867 OMPPrivateScope LoopScope(CGF);
7868 // Emit helper vars inits.
7869 enum { LowerBound = 5, UpperBound, Stride, LastIter };
7870 auto *I = CS->getCapturedDecl()->param_begin();
7871 auto *LBP = std::next(I, LowerBound);
7872 auto *UBP = std::next(I, UpperBound);
7873 auto *STP = std::next(I, Stride);
7874 auto *LIP = std::next(I, LastIter);
7875 mapParam(CGF, cast<DeclRefExpr>(S.getLowerBoundVariable()), *LBP,
7876 LoopScope);
7877 mapParam(CGF, cast<DeclRefExpr>(S.getUpperBoundVariable()), *UBP,
7878 LoopScope);
7879 mapParam(CGF, cast<DeclRefExpr>(S.getStrideVariable()), *STP, LoopScope);
7880 mapParam(CGF, cast<DeclRefExpr>(S.getIsLastIterVariable()), *LIP,
7881 LoopScope);
7882 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
7883 CGF.EmitOMPLinearClause(S, LoopScope);
7884 bool HasLastprivateClause = CGF.EmitOMPLastprivateClauseInit(S, LoopScope);
7885 (void)LoopScope.Privatize();
7886 // Emit the loop iteration variable.
7887 const Expr *IVExpr = S.getIterationVariable();
7888 const auto *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
7889 CGF.EmitVarDecl(*IVDecl);
7890 CGF.EmitIgnoredExpr(S.getInit());
7892 // Emit the iterations count variable.
7893 // If it is not a variable, Sema decided to calculate iterations count on
7894 // each iteration (e.g., it is foldable into a constant).
7895 if (const auto *LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
7896 CGF.EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
7897 // Emit calculation of the iterations count.
7898 CGF.EmitIgnoredExpr(S.getCalcLastIteration());
7902 OMPLexicalScope Scope(CGF, S, OMPD_taskloop, /*EmitPreInitStmt=*/false);
7903 emitCommonSimdLoop(
7904 CGF, S,
7905 [&S](CodeGenFunction &CGF, PrePostActionTy &) {
7906 if (isOpenMPSimdDirective(S.getDirectiveKind()))
7907 CGF.EmitOMPSimdInit(S);
7909 [&S, &LoopScope](CodeGenFunction &CGF, PrePostActionTy &) {
7910 CGF.EmitOMPInnerLoop(
7911 S, LoopScope.requiresCleanups(), S.getCond(), S.getInc(),
7912 [&S](CodeGenFunction &CGF) {
7913 emitOMPLoopBodyWithStopPoint(CGF, S,
7914 CodeGenFunction::JumpDest());
7916 [](CodeGenFunction &) {});
7919 // Emit: if (PreCond) - end.
7920 if (ContBlock) {
7921 CGF.EmitBranch(ContBlock);
7922 CGF.EmitBlock(ContBlock, true);
7924 // Emit final copy of the lastprivate variables if IsLastIter != 0.
7925 if (HasLastprivateClause) {
7926 CGF.EmitOMPLastprivateClauseFinal(
7927 S, isOpenMPSimdDirective(S.getDirectiveKind()),
7928 CGF.Builder.CreateIsNotNull(CGF.EmitLoadOfScalar(
7929 CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
7930 (*LIP)->getType(), S.getBeginLoc())));
7932 LoopScope.restoreMap();
7933 CGF.EmitOMPLinearClauseFinal(S, [LIP, &S](CodeGenFunction &CGF) {
7934 return CGF.Builder.CreateIsNotNull(
7935 CGF.EmitLoadOfScalar(CGF.GetAddrOfLocalVar(*LIP), /*Volatile=*/false,
7936 (*LIP)->getType(), S.getBeginLoc()));
7939 auto &&TaskGen = [&S, SharedsTy, CapturedStruct,
7940 IfCond](CodeGenFunction &CGF, llvm::Function *OutlinedFn,
7941 const OMPTaskDataTy &Data) {
7942 auto &&CodeGen = [&S, OutlinedFn, SharedsTy, CapturedStruct, IfCond,
7943 &Data](CodeGenFunction &CGF, PrePostActionTy &) {
7944 OMPLoopScope PreInitScope(CGF, S);
7945 CGF.CGM.getOpenMPRuntime().emitTaskLoopCall(CGF, S.getBeginLoc(), S,
7946 OutlinedFn, SharedsTy,
7947 CapturedStruct, IfCond, Data);
7949 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_taskloop,
7950 CodeGen);
7952 if (Data.Nogroup) {
7953 EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen, Data);
7954 } else {
7955 CGM.getOpenMPRuntime().emitTaskgroupRegion(
7956 *this,
7957 [&S, &BodyGen, &TaskGen, &Data](CodeGenFunction &CGF,
7958 PrePostActionTy &Action) {
7959 Action.Enter(CGF);
7960 CGF.EmitOMPTaskBasedDirective(S, OMPD_taskloop, BodyGen, TaskGen,
7961 Data);
7963 S.getBeginLoc());
7967 void CodeGenFunction::EmitOMPTaskLoopDirective(const OMPTaskLoopDirective &S) {
7968 auto LPCRegion =
7969 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7970 EmitOMPTaskLoopBasedDirective(S);
7973 void CodeGenFunction::EmitOMPTaskLoopSimdDirective(
7974 const OMPTaskLoopSimdDirective &S) {
7975 auto LPCRegion =
7976 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7977 OMPLexicalScope Scope(*this, S);
7978 EmitOMPTaskLoopBasedDirective(S);
7981 void CodeGenFunction::EmitOMPMasterTaskLoopDirective(
7982 const OMPMasterTaskLoopDirective &S) {
7983 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7984 Action.Enter(CGF);
7985 EmitOMPTaskLoopBasedDirective(S);
7987 auto LPCRegion =
7988 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
7989 OMPLexicalScope Scope(*this, S, std::nullopt, /*EmitPreInitStmt=*/false);
7990 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
7993 void CodeGenFunction::EmitOMPMasterTaskLoopSimdDirective(
7994 const OMPMasterTaskLoopSimdDirective &S) {
7995 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
7996 Action.Enter(CGF);
7997 EmitOMPTaskLoopBasedDirective(S);
7999 auto LPCRegion =
8000 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
8001 OMPLexicalScope Scope(*this, S);
8002 CGM.getOpenMPRuntime().emitMasterRegion(*this, CodeGen, S.getBeginLoc());
8005 void CodeGenFunction::EmitOMPParallelMasterTaskLoopDirective(
8006 const OMPParallelMasterTaskLoopDirective &S) {
8007 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8008 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF,
8009 PrePostActionTy &Action) {
8010 Action.Enter(CGF);
8011 CGF.EmitOMPTaskLoopBasedDirective(S);
8013 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
8014 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
8015 S.getBeginLoc());
8017 auto LPCRegion =
8018 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
8019 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop, CodeGen,
8020 emitEmptyBoundParameters);
8023 void CodeGenFunction::EmitOMPParallelMasterTaskLoopSimdDirective(
8024 const OMPParallelMasterTaskLoopSimdDirective &S) {
8025 auto &&CodeGen = [this, &S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8026 auto &&TaskLoopCodeGen = [&S](CodeGenFunction &CGF,
8027 PrePostActionTy &Action) {
8028 Action.Enter(CGF);
8029 CGF.EmitOMPTaskLoopBasedDirective(S);
8031 OMPLexicalScope Scope(CGF, S, OMPD_parallel, /*EmitPreInitStmt=*/false);
8032 CGM.getOpenMPRuntime().emitMasterRegion(CGF, TaskLoopCodeGen,
8033 S.getBeginLoc());
8035 auto LPCRegion =
8036 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
8037 emitCommonOMPParallelDirective(*this, S, OMPD_master_taskloop_simd, CodeGen,
8038 emitEmptyBoundParameters);
8041 // Generate the instructions for '#pragma omp target update' directive.
8042 void CodeGenFunction::EmitOMPTargetUpdateDirective(
8043 const OMPTargetUpdateDirective &S) {
8044 // If we don't have target devices, don't bother emitting the data mapping
8045 // code.
8046 if (CGM.getLangOpts().OMPTargetTriples.empty())
8047 return;
8049 // Check if we have any if clause associated with the directive.
8050 const Expr *IfCond = nullptr;
8051 if (const auto *C = S.getSingleClause<OMPIfClause>())
8052 IfCond = C->getCondition();
8054 // Check if we have any device clause associated with the directive.
8055 const Expr *Device = nullptr;
8056 if (const auto *C = S.getSingleClause<OMPDeviceClause>())
8057 Device = C->getDevice();
8059 OMPLexicalScope Scope(*this, S, OMPD_task);
8060 CGM.getOpenMPRuntime().emitTargetDataStandAloneCall(*this, S, IfCond, Device);
8063 void CodeGenFunction::EmitOMPGenericLoopDirective(
8064 const OMPGenericLoopDirective &S) {
8065 // Always expect a bind clause on the loop directive. It it wasn't
8066 // in the source, it should have been added in sema.
8068 OpenMPBindClauseKind BindKind = OMPC_BIND_unknown;
8069 if (const auto *C = S.getSingleClause<OMPBindClause>())
8070 BindKind = C->getBindKind();
8072 switch (BindKind) {
8073 case OMPC_BIND_parallel: // for
8074 return emitOMPForDirective(S, *this, CGM, /*HasCancel=*/false);
8075 case OMPC_BIND_teams: // distribute
8076 return emitOMPDistributeDirective(S, *this, CGM);
8077 case OMPC_BIND_thread: // simd
8078 return emitOMPSimdDirective(S, *this, CGM);
8079 case OMPC_BIND_unknown:
8080 break;
8083 // Unimplemented, just inline the underlying statement for now.
8084 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8085 // Emit the loop iteration variable.
8086 const Stmt *CS =
8087 cast<CapturedStmt>(S.getAssociatedStmt())->getCapturedStmt();
8088 const auto *ForS = dyn_cast<ForStmt>(CS);
8089 if (ForS && !isa<DeclStmt>(ForS->getInit())) {
8090 OMPPrivateScope LoopScope(CGF);
8091 CGF.EmitOMPPrivateLoopCounters(S, LoopScope);
8092 (void)LoopScope.Privatize();
8093 CGF.EmitStmt(CS);
8094 LoopScope.restoreMap();
8095 } else {
8096 CGF.EmitStmt(CS);
8099 OMPLexicalScope Scope(*this, S, OMPD_unknown);
8100 CGM.getOpenMPRuntime().emitInlinedDirective(*this, OMPD_loop, CodeGen);
8103 void CodeGenFunction::EmitOMPParallelGenericLoopDirective(
8104 const OMPLoopDirective &S) {
8105 // Emit combined directive as if its constituent constructs are 'parallel'
8106 // and 'for'.
8107 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8108 Action.Enter(CGF);
8109 emitOMPCopyinClause(CGF, S);
8110 (void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
8113 auto LPCRegion =
8114 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
8115 emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
8116 emitEmptyBoundParameters);
8118 // Check for outer lastprivate conditional update.
8119 checkForLastprivateConditionalUpdate(*this, S);
8122 void CodeGenFunction::EmitOMPTeamsGenericLoopDirective(
8123 const OMPTeamsGenericLoopDirective &S) {
8124 // To be consistent with current behavior of 'target teams loop', emit
8125 // 'teams loop' as if its constituent constructs are 'teams' and 'distribute'.
8126 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
8127 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
8130 // Emit teams region as a standalone region.
8131 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
8132 PrePostActionTy &Action) {
8133 Action.Enter(CGF);
8134 OMPPrivateScope PrivateScope(CGF);
8135 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
8136 (void)PrivateScope.Privatize();
8137 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(CGF, OMPD_distribute,
8138 CodeGenDistribute);
8139 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
8141 emitCommonOMPTeamsDirective(*this, S, OMPD_distribute, CodeGen);
8142 emitPostUpdateForReductionClause(*this, S,
8143 [](CodeGenFunction &) { return nullptr; });
8146 #ifndef NDEBUG
8147 static void emitTargetTeamsLoopCodegenStatus(CodeGenFunction &CGF,
8148 std::string StatusMsg,
8149 const OMPExecutableDirective &D) {
8150 bool IsDevice = CGF.CGM.getLangOpts().OpenMPIsTargetDevice;
8151 if (IsDevice)
8152 StatusMsg += ": DEVICE";
8153 else
8154 StatusMsg += ": HOST";
8155 SourceLocation L = D.getBeginLoc();
8156 auto &SM = CGF.getContext().getSourceManager();
8157 PresumedLoc PLoc = SM.getPresumedLoc(L);
8158 const char *FileName = PLoc.isValid() ? PLoc.getFilename() : nullptr;
8159 unsigned LineNo =
8160 PLoc.isValid() ? PLoc.getLine() : SM.getExpansionLineNumber(L);
8161 llvm::dbgs() << StatusMsg << ": " << FileName << ": " << LineNo << "\n";
8163 #endif
8165 static void emitTargetTeamsGenericLoopRegionAsParallel(
8166 CodeGenFunction &CGF, PrePostActionTy &Action,
8167 const OMPTargetTeamsGenericLoopDirective &S) {
8168 Action.Enter(CGF);
8169 // Emit 'teams loop' as if its constituent constructs are 'distribute,
8170 // 'parallel, and 'for'.
8171 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
8172 CGF.EmitOMPDistributeLoop(S, emitInnerParallelForWhenCombined,
8173 S.getDistInc());
8176 // Emit teams region as a standalone region.
8177 auto &&CodeGenTeams = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
8178 PrePostActionTy &Action) {
8179 Action.Enter(CGF);
8180 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
8181 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
8182 (void)PrivateScope.Privatize();
8183 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
8184 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
8185 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
8187 DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE,
8188 emitTargetTeamsLoopCodegenStatus(
8189 CGF, TTL_CODEGEN_TYPE " as parallel for", S));
8190 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute_parallel_for,
8191 CodeGenTeams);
8192 emitPostUpdateForReductionClause(CGF, S,
8193 [](CodeGenFunction &) { return nullptr; });
8196 static void emitTargetTeamsGenericLoopRegionAsDistribute(
8197 CodeGenFunction &CGF, PrePostActionTy &Action,
8198 const OMPTargetTeamsGenericLoopDirective &S) {
8199 Action.Enter(CGF);
8200 // Emit 'teams loop' as if its constituent construct is 'distribute'.
8201 auto &&CodeGenDistribute = [&S](CodeGenFunction &CGF, PrePostActionTy &) {
8202 CGF.EmitOMPDistributeLoop(S, emitOMPLoopBodyWithStopPoint, S.getInc());
8205 // Emit teams region as a standalone region.
8206 auto &&CodeGen = [&S, &CodeGenDistribute](CodeGenFunction &CGF,
8207 PrePostActionTy &Action) {
8208 Action.Enter(CGF);
8209 CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
8210 CGF.EmitOMPReductionClauseInit(S, PrivateScope);
8211 (void)PrivateScope.Privatize();
8212 CGF.CGM.getOpenMPRuntime().emitInlinedDirective(
8213 CGF, OMPD_distribute, CodeGenDistribute, /*HasCancel=*/false);
8214 CGF.EmitOMPReductionClauseFinal(S, /*ReductionKind=*/OMPD_teams);
8216 DEBUG_WITH_TYPE(TTL_CODEGEN_TYPE,
8217 emitTargetTeamsLoopCodegenStatus(
8218 CGF, TTL_CODEGEN_TYPE " as distribute", S));
8219 emitCommonOMPTeamsDirective(CGF, S, OMPD_distribute, CodeGen);
8220 emitPostUpdateForReductionClause(CGF, S,
8221 [](CodeGenFunction &) { return nullptr; });
8224 void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDirective(
8225 const OMPTargetTeamsGenericLoopDirective &S) {
8226 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8227 if (S.canBeParallelFor())
8228 emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S);
8229 else
8230 emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S);
8232 emitCommonOMPTargetDirective(*this, S, CodeGen);
8235 void CodeGenFunction::EmitOMPTargetTeamsGenericLoopDeviceFunction(
8236 CodeGenModule &CGM, StringRef ParentName,
8237 const OMPTargetTeamsGenericLoopDirective &S) {
8238 // Emit SPMD target parallel loop region as a standalone region.
8239 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8240 if (S.canBeParallelFor())
8241 emitTargetTeamsGenericLoopRegionAsParallel(CGF, Action, S);
8242 else
8243 emitTargetTeamsGenericLoopRegionAsDistribute(CGF, Action, S);
8245 llvm::Function *Fn;
8246 llvm::Constant *Addr;
8247 // Emit target region as a standalone region.
8248 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
8249 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
8250 assert(Fn && Addr &&
8251 "Target device function emission failed for 'target teams loop'.");
8254 static void emitTargetParallelGenericLoopRegion(
8255 CodeGenFunction &CGF, const OMPTargetParallelGenericLoopDirective &S,
8256 PrePostActionTy &Action) {
8257 Action.Enter(CGF);
8258 // Emit as 'parallel for'.
8259 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8260 Action.Enter(CGF);
8261 CodeGenFunction::OMPCancelStackRAII CancelRegion(
8262 CGF, OMPD_target_parallel_loop, /*hasCancel=*/false);
8263 CGF.EmitOMPWorksharingLoop(S, S.getEnsureUpperBound(), emitForLoopBounds,
8264 emitDispatchForLoopBounds);
8266 emitCommonOMPParallelDirective(CGF, S, OMPD_for, CodeGen,
8267 emitEmptyBoundParameters);
8270 void CodeGenFunction::EmitOMPTargetParallelGenericLoopDeviceFunction(
8271 CodeGenModule &CGM, StringRef ParentName,
8272 const OMPTargetParallelGenericLoopDirective &S) {
8273 // Emit target parallel loop region as a standalone region.
8274 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8275 emitTargetParallelGenericLoopRegion(CGF, S, Action);
8277 llvm::Function *Fn;
8278 llvm::Constant *Addr;
8279 // Emit target region as a standalone region.
8280 CGM.getOpenMPRuntime().emitTargetOutlinedFunction(
8281 S, ParentName, Fn, Addr, /*IsOffloadEntry=*/true, CodeGen);
8282 assert(Fn && Addr && "Target device function emission failed.");
8285 /// Emit combined directive 'target parallel loop' as if its constituent
8286 /// constructs are 'target', 'parallel', and 'for'.
8287 void CodeGenFunction::EmitOMPTargetParallelGenericLoopDirective(
8288 const OMPTargetParallelGenericLoopDirective &S) {
8289 auto &&CodeGen = [&S](CodeGenFunction &CGF, PrePostActionTy &Action) {
8290 emitTargetParallelGenericLoopRegion(CGF, S, Action);
8292 emitCommonOMPTargetDirective(*this, S, CodeGen);
8295 void CodeGenFunction::EmitSimpleOMPExecutableDirective(
8296 const OMPExecutableDirective &D) {
8297 if (const auto *SD = dyn_cast<OMPScanDirective>(&D)) {
8298 EmitOMPScanDirective(*SD);
8299 return;
8301 if (!D.hasAssociatedStmt() || !D.getAssociatedStmt())
8302 return;
8303 auto &&CodeGen = [&D](CodeGenFunction &CGF, PrePostActionTy &Action) {
8304 OMPPrivateScope GlobalsScope(CGF);
8305 if (isOpenMPTaskingDirective(D.getDirectiveKind())) {
8306 // Capture global firstprivates to avoid crash.
8307 for (const auto *C : D.getClausesOfKind<OMPFirstprivateClause>()) {
8308 for (const Expr *Ref : C->varlist()) {
8309 const auto *DRE = cast<DeclRefExpr>(Ref->IgnoreParenImpCasts());
8310 if (!DRE)
8311 continue;
8312 const auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
8313 if (!VD || VD->hasLocalStorage())
8314 continue;
8315 if (!CGF.LocalDeclMap.count(VD)) {
8316 LValue GlobLVal = CGF.EmitLValue(Ref);
8317 GlobalsScope.addPrivate(VD, GlobLVal.getAddress());
8322 if (isOpenMPSimdDirective(D.getDirectiveKind())) {
8323 (void)GlobalsScope.Privatize();
8324 ParentLoopDirectiveForScanRegion ScanRegion(CGF, D);
8325 emitOMPSimdRegion(CGF, cast<OMPLoopDirective>(D), Action);
8326 } else {
8327 if (const auto *LD = dyn_cast<OMPLoopDirective>(&D)) {
8328 for (const Expr *E : LD->counters()) {
8329 const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
8330 if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
8331 LValue GlobLVal = CGF.EmitLValue(E);
8332 GlobalsScope.addPrivate(VD, GlobLVal.getAddress());
8334 if (isa<OMPCapturedExprDecl>(VD)) {
8335 // Emit only those that were not explicitly referenced in clauses.
8336 if (!CGF.LocalDeclMap.count(VD))
8337 CGF.EmitVarDecl(*VD);
8340 for (const auto *C : D.getClausesOfKind<OMPOrderedClause>()) {
8341 if (!C->getNumForLoops())
8342 continue;
8343 for (unsigned I = LD->getLoopsNumber(),
8344 E = C->getLoopNumIterations().size();
8345 I < E; ++I) {
8346 if (const auto *VD = dyn_cast<OMPCapturedExprDecl>(
8347 cast<DeclRefExpr>(C->getLoopCounter(I))->getDecl())) {
8348 // Emit only those that were not explicitly referenced in clauses.
8349 if (!CGF.LocalDeclMap.count(VD))
8350 CGF.EmitVarDecl(*VD);
8355 (void)GlobalsScope.Privatize();
8356 CGF.EmitStmt(D.getInnermostCapturedStmt()->getCapturedStmt());
8359 if (D.getDirectiveKind() == OMPD_atomic ||
8360 D.getDirectiveKind() == OMPD_critical ||
8361 D.getDirectiveKind() == OMPD_section ||
8362 D.getDirectiveKind() == OMPD_master ||
8363 D.getDirectiveKind() == OMPD_masked ||
8364 D.getDirectiveKind() == OMPD_unroll ||
8365 D.getDirectiveKind() == OMPD_assume) {
8366 EmitStmt(D.getAssociatedStmt());
8367 } else {
8368 auto LPCRegion =
8369 CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, D);
8370 OMPSimdLexicalScope Scope(*this, D);
8371 CGM.getOpenMPRuntime().emitInlinedDirective(
8372 *this,
8373 isOpenMPSimdDirective(D.getDirectiveKind()) ? OMPD_simd
8374 : D.getDirectiveKind(),
8375 CodeGen);
8377 // Check for outer lastprivate conditional update.
8378 checkForLastprivateConditionalUpdate(*this, D);
8381 void CodeGenFunction::EmitOMPAssumeDirective(const OMPAssumeDirective &S) {
8382 EmitStmt(S.getAssociatedStmt());