[AMDGPU][AsmParser][NFC] Get rid of custom default operand handlers.
[llvm-project.git] / clang / lib / CodeGen / CodeGenFunction.cpp
blobf6601638e62a1a613835fbae195bfa7e51807802
1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
11 //===----------------------------------------------------------------------===//
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGHLSLRuntime.h"
20 #include "CGOpenMPRuntime.h"
21 #include "CodeGenModule.h"
22 #include "CodeGenPGO.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/ASTLambda.h"
26 #include "clang/AST/Attr.h"
27 #include "clang/AST/Decl.h"
28 #include "clang/AST/DeclCXX.h"
29 #include "clang/AST/Expr.h"
30 #include "clang/AST/StmtCXX.h"
31 #include "clang/AST/StmtObjC.h"
32 #include "clang/Basic/Builtins.h"
33 #include "clang/Basic/CodeGenOptions.h"
34 #include "clang/Basic/TargetInfo.h"
35 #include "clang/CodeGen/CGFunctionInfo.h"
36 #include "clang/Frontend/FrontendDiagnostic.h"
37 #include "llvm/ADT/ArrayRef.h"
38 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/Dominators.h"
41 #include "llvm/IR/FPEnv.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/MDBuilder.h"
45 #include "llvm/IR/Operator.h"
46 #include "llvm/Support/CRC.h"
47 #include "llvm/Support/xxhash.h"
48 #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
49 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
50 #include <optional>
52 using namespace clang;
53 using namespace CodeGen;
55 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
56 /// markers.
57 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
58 const LangOptions &LangOpts) {
59 if (CGOpts.DisableLifetimeMarkers)
60 return false;
62 // Sanitizers may use markers.
63 if (CGOpts.SanitizeAddressUseAfterScope ||
64 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
65 LangOpts.Sanitize.has(SanitizerKind::Memory))
66 return true;
68 // For now, only in optimized builds.
69 return CGOpts.OptimizationLevel != 0;
72 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
73 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
74 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
75 CGBuilderInserterTy(this)),
76 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
77 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
78 ShouldEmitLifetimeMarkers(
79 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
80 if (!suppressNewContext)
81 CGM.getCXXABI().getMangleContext().startNewFunction();
82 EHStack.setCGF(this);
84 SetFastMathFlags(CurFPFeatures);
87 CodeGenFunction::~CodeGenFunction() {
88 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
90 if (getLangOpts().OpenMP && CurFn)
91 CGM.getOpenMPRuntime().functionFinished(*this);
93 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
94 // outlining etc) at some point. Doing it once the function codegen is done
95 // seems to be a reasonable spot. We do it here, as opposed to the deletion
96 // time of the CodeGenModule, because we have to ensure the IR has not yet
97 // been "emitted" to the outside, thus, modifications are still sensible.
98 if (CGM.getLangOpts().OpenMPIRBuilder && CurFn)
99 CGM.getOpenMPRuntime().getOMPBuilder().finalize(CurFn);
102 // Map the LangOption for exception behavior into
103 // the corresponding enum in the IR.
104 llvm::fp::ExceptionBehavior
105 clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
107 switch (Kind) {
108 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
109 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
110 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
111 default:
112 llvm_unreachable("Unsupported FP Exception Behavior");
116 void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
117 llvm::FastMathFlags FMF;
118 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
119 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
120 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
121 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
122 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
123 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
124 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
125 Builder.setFastMathFlags(FMF);
128 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
129 const Expr *E)
130 : CGF(CGF) {
131 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
134 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
135 FPOptions FPFeatures)
136 : CGF(CGF) {
137 ConstructorHelper(FPFeatures);
140 void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
141 OldFPFeatures = CGF.CurFPFeatures;
142 CGF.CurFPFeatures = FPFeatures;
144 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
145 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
147 if (OldFPFeatures == FPFeatures)
148 return;
150 FMFGuard.emplace(CGF.Builder);
152 llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
153 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
154 auto NewExceptionBehavior =
155 ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
156 FPFeatures.getExceptionMode()));
157 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
159 CGF.SetFastMathFlags(FPFeatures);
161 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
162 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
163 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
164 (NewExceptionBehavior == llvm::fp::ebIgnore &&
165 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
166 "FPConstrained should be enabled on entire function");
168 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
169 auto OldValue =
170 CGF.CurFn->getFnAttribute(Name).getValueAsBool();
171 auto NewValue = OldValue & Value;
172 if (OldValue != NewValue)
173 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
175 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
176 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
177 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
178 mergeFnAttrValue(
179 "unsafe-fp-math",
180 FPFeatures.getAllowFPReassociate() && FPFeatures.getAllowReciprocal() &&
181 FPFeatures.getAllowApproxFunc() && FPFeatures.getNoSignedZero() &&
182 FPFeatures.allowFPContractAcrossStatement());
185 CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
186 CGF.CurFPFeatures = OldFPFeatures;
187 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
188 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
191 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
192 LValueBaseInfo BaseInfo;
193 TBAAAccessInfo TBAAInfo;
194 CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
195 Address Addr(V, ConvertTypeForMem(T), Alignment);
196 return LValue::MakeAddr(Addr, T, getContext(), BaseInfo, TBAAInfo);
199 /// Given a value of type T* that may not be to a complete object,
200 /// construct an l-value with the natural pointee alignment of T.
201 LValue
202 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
203 LValueBaseInfo BaseInfo;
204 TBAAAccessInfo TBAAInfo;
205 CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
206 /* forPointeeType= */ true);
207 Address Addr(V, ConvertTypeForMem(T), Align);
208 return MakeAddrLValue(Addr, T, BaseInfo, TBAAInfo);
212 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
213 return CGM.getTypes().ConvertTypeForMem(T);
216 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
217 return CGM.getTypes().ConvertType(T);
220 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
221 type = type.getCanonicalType();
222 while (true) {
223 switch (type->getTypeClass()) {
224 #define TYPE(name, parent)
225 #define ABSTRACT_TYPE(name, parent)
226 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
227 #define DEPENDENT_TYPE(name, parent) case Type::name:
228 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
229 #include "clang/AST/TypeNodes.inc"
230 llvm_unreachable("non-canonical or dependent type in IR-generation");
232 case Type::Auto:
233 case Type::DeducedTemplateSpecialization:
234 llvm_unreachable("undeduced type in IR-generation");
236 // Various scalar types.
237 case Type::Builtin:
238 case Type::Pointer:
239 case Type::BlockPointer:
240 case Type::LValueReference:
241 case Type::RValueReference:
242 case Type::MemberPointer:
243 case Type::Vector:
244 case Type::ExtVector:
245 case Type::ConstantMatrix:
246 case Type::FunctionProto:
247 case Type::FunctionNoProto:
248 case Type::Enum:
249 case Type::ObjCObjectPointer:
250 case Type::Pipe:
251 case Type::BitInt:
252 return TEK_Scalar;
254 // Complexes.
255 case Type::Complex:
256 return TEK_Complex;
258 // Arrays, records, and Objective-C objects.
259 case Type::ConstantArray:
260 case Type::IncompleteArray:
261 case Type::VariableArray:
262 case Type::Record:
263 case Type::ObjCObject:
264 case Type::ObjCInterface:
265 return TEK_Aggregate;
267 // We operate on atomic values according to their underlying type.
268 case Type::Atomic:
269 type = cast<AtomicType>(type)->getValueType();
270 continue;
272 llvm_unreachable("unknown type kind!");
276 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
277 // For cleanliness, we try to avoid emitting the return block for
278 // simple cases.
279 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
281 if (CurBB) {
282 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
284 // We have a valid insert point, reuse it if it is empty or there are no
285 // explicit jumps to the return block.
286 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
287 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
288 delete ReturnBlock.getBlock();
289 ReturnBlock = JumpDest();
290 } else
291 EmitBlock(ReturnBlock.getBlock());
292 return llvm::DebugLoc();
295 // Otherwise, if the return block is the target of a single direct
296 // branch then we can just put the code in that block instead. This
297 // cleans up functions which started with a unified return block.
298 if (ReturnBlock.getBlock()->hasOneUse()) {
299 llvm::BranchInst *BI =
300 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
301 if (BI && BI->isUnconditional() &&
302 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
303 // Record/return the DebugLoc of the simple 'return' expression to be used
304 // later by the actual 'ret' instruction.
305 llvm::DebugLoc Loc = BI->getDebugLoc();
306 Builder.SetInsertPoint(BI->getParent());
307 BI->eraseFromParent();
308 delete ReturnBlock.getBlock();
309 ReturnBlock = JumpDest();
310 return Loc;
314 // FIXME: We are at an unreachable point, there is no reason to emit the block
315 // unless it has uses. However, we still need a place to put the debug
316 // region.end for now.
318 EmitBlock(ReturnBlock.getBlock());
319 return llvm::DebugLoc();
322 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
323 if (!BB) return;
324 if (!BB->use_empty()) {
325 CGF.CurFn->insert(CGF.CurFn->end(), BB);
326 return;
328 delete BB;
331 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
332 assert(BreakContinueStack.empty() &&
333 "mismatched push/pop in break/continue stack!");
335 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
336 && NumSimpleReturnExprs == NumReturnExprs
337 && ReturnBlock.getBlock()->use_empty();
338 // Usually the return expression is evaluated before the cleanup
339 // code. If the function contains only a simple return statement,
340 // such as a constant, the location before the cleanup code becomes
341 // the last useful breakpoint in the function, because the simple
342 // return expression will be evaluated after the cleanup code. To be
343 // safe, set the debug location for cleanup code to the location of
344 // the return statement. Otherwise the cleanup code should be at the
345 // end of the function's lexical scope.
347 // If there are multiple branches to the return block, the branch
348 // instructions will get the location of the return statements and
349 // all will be fine.
350 if (CGDebugInfo *DI = getDebugInfo()) {
351 if (OnlySimpleReturnStmts)
352 DI->EmitLocation(Builder, LastStopPoint);
353 else
354 DI->EmitLocation(Builder, EndLoc);
357 // Pop any cleanups that might have been associated with the
358 // parameters. Do this in whatever block we're currently in; it's
359 // important to do this before we enter the return block or return
360 // edges will be *really* confused.
361 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
362 bool HasOnlyLifetimeMarkers =
363 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
364 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
366 std::optional<ApplyDebugLocation> OAL;
367 if (HasCleanups) {
368 // Make sure the line table doesn't jump back into the body for
369 // the ret after it's been at EndLoc.
370 if (CGDebugInfo *DI = getDebugInfo()) {
371 if (OnlySimpleReturnStmts)
372 DI->EmitLocation(Builder, EndLoc);
373 else
374 // We may not have a valid end location. Try to apply it anyway, and
375 // fall back to an artificial location if needed.
376 OAL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
379 PopCleanupBlocks(PrologueCleanupDepth);
382 // Emit function epilog (to return).
383 llvm::DebugLoc Loc = EmitReturnBlock();
385 if (ShouldInstrumentFunction()) {
386 if (CGM.getCodeGenOpts().InstrumentFunctions)
387 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
388 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
389 CurFn->addFnAttr("instrument-function-exit-inlined",
390 "__cyg_profile_func_exit");
393 // Emit debug descriptor for function end.
394 if (CGDebugInfo *DI = getDebugInfo())
395 DI->EmitFunctionEnd(Builder, CurFn);
397 // Reset the debug location to that of the simple 'return' expression, if any
398 // rather than that of the end of the function's scope '}'.
399 ApplyDebugLocation AL(*this, Loc);
400 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
401 EmitEndEHSpec(CurCodeDecl);
403 assert(EHStack.empty() &&
404 "did not remove all scopes from cleanup stack!");
406 // If someone did an indirect goto, emit the indirect goto block at the end of
407 // the function.
408 if (IndirectBranch) {
409 EmitBlock(IndirectBranch->getParent());
410 Builder.ClearInsertionPoint();
413 // If some of our locals escaped, insert a call to llvm.localescape in the
414 // entry block.
415 if (!EscapedLocals.empty()) {
416 // Invert the map from local to index into a simple vector. There should be
417 // no holes.
418 SmallVector<llvm::Value *, 4> EscapeArgs;
419 EscapeArgs.resize(EscapedLocals.size());
420 for (auto &Pair : EscapedLocals)
421 EscapeArgs[Pair.second] = Pair.first;
422 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
423 &CGM.getModule(), llvm::Intrinsic::localescape);
424 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
427 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
428 llvm::Instruction *Ptr = AllocaInsertPt;
429 AllocaInsertPt = nullptr;
430 Ptr->eraseFromParent();
432 // PostAllocaInsertPt, if created, was lazily created when it was required,
433 // remove it now since it was just created for our own convenience.
434 if (PostAllocaInsertPt) {
435 llvm::Instruction *PostPtr = PostAllocaInsertPt;
436 PostAllocaInsertPt = nullptr;
437 PostPtr->eraseFromParent();
440 // If someone took the address of a label but never did an indirect goto, we
441 // made a zero entry PHI node, which is illegal, zap it now.
442 if (IndirectBranch) {
443 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
444 if (PN->getNumIncomingValues() == 0) {
445 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
446 PN->eraseFromParent();
450 EmitIfUsed(*this, EHResumeBlock);
451 EmitIfUsed(*this, TerminateLandingPad);
452 EmitIfUsed(*this, TerminateHandler);
453 EmitIfUsed(*this, UnreachableBlock);
455 for (const auto &FuncletAndParent : TerminateFunclets)
456 EmitIfUsed(*this, FuncletAndParent.second);
458 if (CGM.getCodeGenOpts().EmitDeclMetadata)
459 EmitDeclMetadata();
461 for (const auto &R : DeferredReplacements) {
462 if (llvm::Value *Old = R.first) {
463 Old->replaceAllUsesWith(R.second);
464 cast<llvm::Instruction>(Old)->eraseFromParent();
467 DeferredReplacements.clear();
469 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
470 // PHIs if the current function is a coroutine. We don't do it for all
471 // functions as it may result in slight increase in numbers of instructions
472 // if compiled with no optimizations. We do it for coroutine as the lifetime
473 // of CleanupDestSlot alloca make correct coroutine frame building very
474 // difficult.
475 if (NormalCleanupDest.isValid() && isCoroutine()) {
476 llvm::DominatorTree DT(*CurFn);
477 llvm::PromoteMemToReg(
478 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
479 NormalCleanupDest = Address::invalid();
482 // Scan function arguments for vector width.
483 for (llvm::Argument &A : CurFn->args())
484 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
485 LargestVectorWidth =
486 std::max((uint64_t)LargestVectorWidth,
487 VT->getPrimitiveSizeInBits().getKnownMinValue());
489 // Update vector width based on return type.
490 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
491 LargestVectorWidth =
492 std::max((uint64_t)LargestVectorWidth,
493 VT->getPrimitiveSizeInBits().getKnownMinValue());
495 if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
496 LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
498 // Add the required-vector-width attribute. This contains the max width from:
499 // 1. min-vector-width attribute used in the source program.
500 // 2. Any builtins used that have a vector width specified.
501 // 3. Values passed in and out of inline assembly.
502 // 4. Width of vector arguments and return types for this function.
503 // 5. Width of vector aguments and return types for functions called by this
504 // function.
505 if (getContext().getTargetInfo().getTriple().isX86())
506 CurFn->addFnAttr("min-legal-vector-width",
507 llvm::utostr(LargestVectorWidth));
509 // Add vscale_range attribute if appropriate.
510 std::optional<std::pair<unsigned, unsigned>> VScaleRange =
511 getContext().getTargetInfo().getVScaleRange(getLangOpts());
512 if (VScaleRange) {
513 CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
514 getLLVMContext(), VScaleRange->first, VScaleRange->second));
517 // If we generated an unreachable return block, delete it now.
518 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
519 Builder.ClearInsertionPoint();
520 ReturnBlock.getBlock()->eraseFromParent();
522 if (ReturnValue.isValid()) {
523 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
524 if (RetAlloca && RetAlloca->use_empty()) {
525 RetAlloca->eraseFromParent();
526 ReturnValue = Address::invalid();
531 /// ShouldInstrumentFunction - Return true if the current function should be
532 /// instrumented with __cyg_profile_func_* calls
533 bool CodeGenFunction::ShouldInstrumentFunction() {
534 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
535 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
536 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
537 return false;
538 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
539 return false;
540 return true;
543 bool CodeGenFunction::ShouldSkipSanitizerInstrumentation() {
544 if (!CurFuncDecl)
545 return false;
546 return CurFuncDecl->hasAttr<DisableSanitizerInstrumentationAttr>();
549 /// ShouldXRayInstrument - Return true if the current function should be
550 /// instrumented with XRay nop sleds.
551 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
552 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
555 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
556 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
557 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
558 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
559 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
560 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
561 XRayInstrKind::Custom);
564 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
565 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
566 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
567 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
568 XRayInstrKind::Typed);
571 llvm::ConstantInt *
572 CodeGenFunction::getUBSanFunctionTypeHash(QualType Ty) const {
573 // Remove any (C++17) exception specifications, to allow calling e.g. a
574 // noexcept function through a non-noexcept pointer.
575 if (!isa<FunctionNoProtoType>(Ty))
576 Ty = getContext().getFunctionTypeWithExceptionSpec(Ty, EST_None);
577 std::string Mangled;
578 llvm::raw_string_ostream Out(Mangled);
579 CGM.getCXXABI().getMangleContext().mangleTypeName(Ty, Out, false);
580 return llvm::ConstantInt::get(CGM.Int32Ty,
581 static_cast<uint32_t>(llvm::xxHash64(Mangled)));
584 void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
585 llvm::Function *Fn) {
586 if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
587 return;
589 llvm::LLVMContext &Context = getLLVMContext();
591 CGM.GenKernelArgMetadata(Fn, FD, this);
593 if (!getLangOpts().OpenCL)
594 return;
596 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
597 QualType HintQTy = A->getTypeHint();
598 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
599 bool IsSignedInteger =
600 HintQTy->isSignedIntegerType() ||
601 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
602 llvm::Metadata *AttrMDArgs[] = {
603 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
604 CGM.getTypes().ConvertType(A->getTypeHint()))),
605 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
606 llvm::IntegerType::get(Context, 32),
607 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
608 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
611 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
612 llvm::Metadata *AttrMDArgs[] = {
613 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
614 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
615 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
616 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
619 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
620 llvm::Metadata *AttrMDArgs[] = {
621 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
622 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
623 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
624 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
627 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
628 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
629 llvm::Metadata *AttrMDArgs[] = {
630 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
631 Fn->setMetadata("intel_reqd_sub_group_size",
632 llvm::MDNode::get(Context, AttrMDArgs));
636 /// Determine whether the function F ends with a return stmt.
637 static bool endsWithReturn(const Decl* F) {
638 const Stmt *Body = nullptr;
639 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
640 Body = FD->getBody();
641 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
642 Body = OMD->getBody();
644 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
645 auto LastStmt = CS->body_rbegin();
646 if (LastStmt != CS->body_rend())
647 return isa<ReturnStmt>(*LastStmt);
649 return false;
652 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
653 if (SanOpts.has(SanitizerKind::Thread)) {
654 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
655 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
659 /// Check if the return value of this function requires sanitization.
660 bool CodeGenFunction::requiresReturnValueCheck() const {
661 return requiresReturnValueNullabilityCheck() ||
662 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
663 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
666 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
667 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
668 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
669 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
670 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
671 return false;
673 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
674 return false;
676 if (MD->getNumParams() == 2) {
677 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
678 if (!PT || !PT->isVoidPointerType() ||
679 !PT->getPointeeType().isConstQualified())
680 return false;
683 return true;
686 /// Return the UBSan prologue signature for \p FD if one is available.
687 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
688 const FunctionDecl *FD) {
689 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
690 if (!MD->isStatic())
691 return nullptr;
692 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
695 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
696 llvm::Function *Fn,
697 const CGFunctionInfo &FnInfo,
698 const FunctionArgList &Args,
699 SourceLocation Loc,
700 SourceLocation StartLoc) {
701 assert(!CurFn &&
702 "Do not use a CodeGenFunction object for more than one function");
704 const Decl *D = GD.getDecl();
706 DidCallStackSave = false;
707 CurCodeDecl = D;
708 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
709 if (FD && FD->usesSEHTry())
710 CurSEHParent = GD;
711 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
712 FnRetTy = RetTy;
713 CurFn = Fn;
714 CurFnInfo = &FnInfo;
715 assert(CurFn->isDeclaration() && "Function already has body?");
717 // If this function is ignored for any of the enabled sanitizers,
718 // disable the sanitizer for the function.
719 do {
720 #define SANITIZER(NAME, ID) \
721 if (SanOpts.empty()) \
722 break; \
723 if (SanOpts.has(SanitizerKind::ID)) \
724 if (CGM.isInNoSanitizeList(SanitizerKind::ID, Fn, Loc)) \
725 SanOpts.set(SanitizerKind::ID, false);
727 #include "clang/Basic/Sanitizers.def"
728 #undef SANITIZER
729 } while (false);
731 if (D) {
732 const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
733 SanitizerMask no_sanitize_mask;
734 bool NoSanitizeCoverage = false;
736 for (auto *Attr : D->specific_attrs<NoSanitizeAttr>()) {
737 no_sanitize_mask |= Attr->getMask();
738 // SanitizeCoverage is not handled by SanOpts.
739 if (Attr->hasCoverage())
740 NoSanitizeCoverage = true;
743 // Apply the no_sanitize* attributes to SanOpts.
744 SanOpts.Mask &= ~no_sanitize_mask;
745 if (no_sanitize_mask & SanitizerKind::Address)
746 SanOpts.set(SanitizerKind::KernelAddress, false);
747 if (no_sanitize_mask & SanitizerKind::KernelAddress)
748 SanOpts.set(SanitizerKind::Address, false);
749 if (no_sanitize_mask & SanitizerKind::HWAddress)
750 SanOpts.set(SanitizerKind::KernelHWAddress, false);
751 if (no_sanitize_mask & SanitizerKind::KernelHWAddress)
752 SanOpts.set(SanitizerKind::HWAddress, false);
754 if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
755 Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
757 if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
758 Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
760 // Some passes need the non-negated no_sanitize attribute. Pass them on.
761 if (CGM.getCodeGenOpts().hasSanitizeBinaryMetadata()) {
762 if (no_sanitize_mask & SanitizerKind::Thread)
763 Fn->addFnAttr("no_sanitize_thread");
767 if (ShouldSkipSanitizerInstrumentation()) {
768 CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
769 } else {
770 // Apply sanitizer attributes to the function.
771 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
772 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
773 if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
774 SanitizerKind::KernelHWAddress))
775 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
776 if (SanOpts.has(SanitizerKind::MemtagStack))
777 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
778 if (SanOpts.has(SanitizerKind::Thread))
779 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
780 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
781 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
783 if (SanOpts.has(SanitizerKind::SafeStack))
784 Fn->addFnAttr(llvm::Attribute::SafeStack);
785 if (SanOpts.has(SanitizerKind::ShadowCallStack))
786 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
788 // Apply fuzzing attribute to the function.
789 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
790 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
792 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
793 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
794 if (SanOpts.has(SanitizerKind::Thread)) {
795 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
796 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
797 if (OMD->getMethodFamily() == OMF_dealloc ||
798 OMD->getMethodFamily() == OMF_initialize ||
799 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
800 markAsIgnoreThreadCheckingAtRuntime(Fn);
805 // Ignore unrelated casts in STL allocate() since the allocator must cast
806 // from void* to T* before object initialization completes. Don't match on the
807 // namespace because not all allocators are in std::
808 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
809 if (matchesStlAllocatorFn(D, getContext()))
810 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
813 // Ignore null checks in coroutine functions since the coroutines passes
814 // are not aware of how to move the extra UBSan instructions across the split
815 // coroutine boundaries.
816 if (D && SanOpts.has(SanitizerKind::Null))
817 if (FD && FD->getBody() &&
818 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
819 SanOpts.Mask &= ~SanitizerKind::Null;
821 // Apply xray attributes to the function (as a string, for now)
822 bool AlwaysXRayAttr = false;
823 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
824 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
825 XRayInstrKind::FunctionEntry) ||
826 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
827 XRayInstrKind::FunctionExit)) {
828 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
829 Fn->addFnAttr("function-instrument", "xray-always");
830 AlwaysXRayAttr = true;
832 if (XRayAttr->neverXRayInstrument())
833 Fn->addFnAttr("function-instrument", "xray-never");
834 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
835 if (ShouldXRayInstrumentFunction())
836 Fn->addFnAttr("xray-log-args",
837 llvm::utostr(LogArgs->getArgumentCount()));
839 } else {
840 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
841 Fn->addFnAttr(
842 "xray-instruction-threshold",
843 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
846 if (ShouldXRayInstrumentFunction()) {
847 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
848 Fn->addFnAttr("xray-ignore-loops");
850 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
851 XRayInstrKind::FunctionExit))
852 Fn->addFnAttr("xray-skip-exit");
854 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
855 XRayInstrKind::FunctionEntry))
856 Fn->addFnAttr("xray-skip-entry");
858 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
859 if (FuncGroups > 1) {
860 auto FuncName = llvm::ArrayRef<uint8_t>(CurFn->getName().bytes_begin(),
861 CurFn->getName().bytes_end());
862 auto Group = crc32(FuncName) % FuncGroups;
863 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
864 !AlwaysXRayAttr)
865 Fn->addFnAttr("function-instrument", "xray-never");
869 if (CGM.getCodeGenOpts().getProfileInstr() != CodeGenOptions::ProfileNone) {
870 switch (CGM.isFunctionBlockedFromProfileInstr(Fn, Loc)) {
871 case ProfileList::Skip:
872 Fn->addFnAttr(llvm::Attribute::SkipProfile);
873 break;
874 case ProfileList::Forbid:
875 Fn->addFnAttr(llvm::Attribute::NoProfile);
876 break;
877 case ProfileList::Allow:
878 break;
882 unsigned Count, Offset;
883 if (const auto *Attr =
884 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
885 Count = Attr->getCount();
886 Offset = Attr->getOffset();
887 } else {
888 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
889 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
891 if (Count && Offset <= Count) {
892 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
893 if (Offset)
894 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
896 // Instruct that functions for COFF/CodeView targets should start with a
897 // patchable instruction, but only on x86/x64. Don't forward this to ARM/ARM64
898 // backends as they don't need it -- instructions on these architectures are
899 // always atomically patchable at runtime.
900 if (CGM.getCodeGenOpts().HotPatch &&
901 getContext().getTargetInfo().getTriple().isX86() &&
902 getContext().getTargetInfo().getTriple().getEnvironment() !=
903 llvm::Triple::CODE16)
904 Fn->addFnAttr("patchable-function", "prologue-short-redirect");
906 // Add no-jump-tables value.
907 if (CGM.getCodeGenOpts().NoUseJumpTables)
908 Fn->addFnAttr("no-jump-tables", "true");
910 // Add no-inline-line-tables value.
911 if (CGM.getCodeGenOpts().NoInlineLineTables)
912 Fn->addFnAttr("no-inline-line-tables");
914 // Add profile-sample-accurate value.
915 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
916 Fn->addFnAttr("profile-sample-accurate");
918 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
919 Fn->addFnAttr("use-sample-profile");
921 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
922 Fn->addFnAttr("cfi-canonical-jump-table");
924 if (D && D->hasAttr<NoProfileFunctionAttr>())
925 Fn->addFnAttr(llvm::Attribute::NoProfile);
927 if (D) {
928 // Function attributes take precedence over command line flags.
929 if (auto *A = D->getAttr<FunctionReturnThunksAttr>()) {
930 switch (A->getThunkType()) {
931 case FunctionReturnThunksAttr::Kind::Keep:
932 break;
933 case FunctionReturnThunksAttr::Kind::Extern:
934 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
935 break;
937 } else if (CGM.getCodeGenOpts().FunctionReturnThunks)
938 Fn->addFnAttr(llvm::Attribute::FnRetThunkExtern);
941 if (FD && (getLangOpts().OpenCL ||
942 (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
943 // Add metadata for a kernel function.
944 EmitKernelMetadata(FD, Fn);
947 // If we are checking function types, emit a function type signature as
948 // prologue data.
949 if (FD && SanOpts.has(SanitizerKind::Function)) {
950 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
951 llvm::LLVMContext &Ctx = Fn->getContext();
952 llvm::MDBuilder MDB(Ctx);
953 Fn->setMetadata(
954 llvm::LLVMContext::MD_func_sanitize,
955 MDB.createRTTIPointerPrologue(
956 PrologueSig, getUBSanFunctionTypeHash(FD->getType())));
960 // If we're checking nullability, we need to know whether we can check the
961 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
962 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
963 auto Nullability = FnRetTy->getNullability();
964 if (Nullability && *Nullability == NullabilityKind::NonNull) {
965 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
966 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
967 RetValNullabilityPrecondition =
968 llvm::ConstantInt::getTrue(getLLVMContext());
972 // If we're in C++ mode and the function name is "main", it is guaranteed
973 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
974 // used within a program").
976 // OpenCL C 2.0 v2.2-11 s6.9.i:
977 // Recursion is not supported.
979 // SYCL v1.2.1 s3.10:
980 // kernels cannot include RTTI information, exception classes,
981 // recursive code, virtual functions or make use of C++ libraries that
982 // are not compiled for the device.
983 if (FD && ((getLangOpts().CPlusPlus && FD->isMain()) ||
984 getLangOpts().OpenCL || getLangOpts().SYCLIsDevice ||
985 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
986 Fn->addFnAttr(llvm::Attribute::NoRecurse);
988 llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
989 llvm::fp::ExceptionBehavior FPExceptionBehavior =
990 ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
991 Builder.setDefaultConstrainedRounding(RM);
992 Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
993 if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
994 (!FD && (FPExceptionBehavior != llvm::fp::ebIgnore ||
995 RM != llvm::RoundingMode::NearestTiesToEven))) {
996 Builder.setIsFPConstrained(true);
997 Fn->addFnAttr(llvm::Attribute::StrictFP);
1000 // If a custom alignment is used, force realigning to this alignment on
1001 // any main function which certainly will need it.
1002 if (FD && ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1003 CGM.getCodeGenOpts().StackAlignment))
1004 Fn->addFnAttr("stackrealign");
1006 // "main" doesn't need to zero out call-used registers.
1007 if (FD && FD->isMain())
1008 Fn->removeFnAttr("zero-call-used-regs");
1010 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1012 // Create a marker to make it easy to insert allocas into the entryblock
1013 // later. Don't create this with the builder, because we don't want it
1014 // folded.
1015 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
1016 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
1018 ReturnBlock = getJumpDestInCurrentScope("return");
1020 Builder.SetInsertPoint(EntryBB);
1022 // If we're checking the return value, allocate space for a pointer to a
1023 // precise source location of the checked return statement.
1024 if (requiresReturnValueCheck()) {
1025 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1026 Builder.CreateStore(llvm::ConstantPointerNull::get(Int8PtrTy),
1027 ReturnLocation);
1030 // Emit subprogram debug descriptor.
1031 if (CGDebugInfo *DI = getDebugInfo()) {
1032 // Reconstruct the type from the argument list so that implicit parameters,
1033 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1034 // convention.
1035 DI->emitFunctionStart(GD, Loc, StartLoc,
1036 DI->getFunctionType(FD, RetTy, Args), CurFn,
1037 CurFuncIsThunk);
1040 if (ShouldInstrumentFunction()) {
1041 if (CGM.getCodeGenOpts().InstrumentFunctions)
1042 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1043 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1044 CurFn->addFnAttr("instrument-function-entry-inlined",
1045 "__cyg_profile_func_enter");
1046 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1047 CurFn->addFnAttr("instrument-function-entry-inlined",
1048 "__cyg_profile_func_enter_bare");
1051 // Since emitting the mcount call here impacts optimizations such as function
1052 // inlining, we just add an attribute to insert a mcount call in backend.
1053 // The attribute "counting-function" is set to mcount function name which is
1054 // architecture dependent.
1055 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1056 // Calls to fentry/mcount should not be generated if function has
1057 // the no_instrument_function attribute.
1058 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1059 if (CGM.getCodeGenOpts().CallFEntry)
1060 Fn->addFnAttr("fentry-call", "true");
1061 else {
1062 Fn->addFnAttr("instrument-function-entry-inlined",
1063 getTarget().getMCountName());
1065 if (CGM.getCodeGenOpts().MNopMCount) {
1066 if (!CGM.getCodeGenOpts().CallFEntry)
1067 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1068 << "-mnop-mcount" << "-mfentry";
1069 Fn->addFnAttr("mnop-mcount");
1072 if (CGM.getCodeGenOpts().RecordMCount) {
1073 if (!CGM.getCodeGenOpts().CallFEntry)
1074 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1075 << "-mrecord-mcount" << "-mfentry";
1076 Fn->addFnAttr("mrecord-mcount");
1081 if (CGM.getCodeGenOpts().PackedStack) {
1082 if (getContext().getTargetInfo().getTriple().getArch() !=
1083 llvm::Triple::systemz)
1084 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1085 << "-mpacked-stack";
1086 Fn->addFnAttr("packed-stack");
1089 if (CGM.getCodeGenOpts().WarnStackSize != UINT_MAX &&
1090 !CGM.getDiags().isIgnored(diag::warn_fe_backend_frame_larger_than, Loc))
1091 Fn->addFnAttr("warn-stack-size",
1092 std::to_string(CGM.getCodeGenOpts().WarnStackSize));
1094 if (RetTy->isVoidType()) {
1095 // Void type; nothing to return.
1096 ReturnValue = Address::invalid();
1098 // Count the implicit return.
1099 if (!endsWithReturn(D))
1100 ++NumReturnExprs;
1101 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1102 // Indirect return; emit returned value directly into sret slot.
1103 // This reduces code size, and affects correctness in C++.
1104 auto AI = CurFn->arg_begin();
1105 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1106 ++AI;
1107 ReturnValue =
1108 Address(&*AI, ConvertType(RetTy),
1109 CurFnInfo->getReturnInfo().getIndirectAlign(), KnownNonNull);
1110 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1111 ReturnValuePointer =
1112 CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1113 Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
1114 ReturnValue.getPointer(), Int8PtrTy),
1115 ReturnValuePointer);
1117 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1118 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1119 // Load the sret pointer from the argument struct and return into that.
1120 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1121 llvm::Function::arg_iterator EI = CurFn->arg_end();
1122 --EI;
1123 llvm::Value *Addr = Builder.CreateStructGEP(
1124 CurFnInfo->getArgStruct(), &*EI, Idx);
1125 llvm::Type *Ty =
1126 cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
1127 ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
1128 Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
1129 ReturnValue = Address(Addr, ConvertType(RetTy),
1130 CGM.getNaturalTypeAlignment(RetTy), KnownNonNull);
1131 } else {
1132 ReturnValue = CreateIRTemp(RetTy, "retval");
1134 // Tell the epilog emitter to autorelease the result. We do this
1135 // now so that various specialized functions can suppress it
1136 // during their IR-generation.
1137 if (getLangOpts().ObjCAutoRefCount &&
1138 !CurFnInfo->isReturnsRetained() &&
1139 RetTy->isObjCRetainableType())
1140 AutoreleaseResult = true;
1143 EmitStartEHSpec(CurCodeDecl);
1145 PrologueCleanupDepth = EHStack.stable_begin();
1147 // Emit OpenMP specific initialization of the device functions.
1148 if (getLangOpts().OpenMP && CurCodeDecl)
1149 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1151 // Handle emitting HLSL entry functions.
1152 if (D && D->hasAttr<HLSLShaderAttr>())
1153 CGM.getHLSLRuntime().emitEntryFunction(FD, Fn);
1155 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1157 if (isa_and_nonnull<CXXMethodDecl>(D) &&
1158 cast<CXXMethodDecl>(D)->isInstance()) {
1159 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1160 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1161 if (MD->getParent()->isLambda() &&
1162 MD->getOverloadedOperator() == OO_Call) {
1163 // We're in a lambda; figure out the captures.
1164 MD->getParent()->getCaptureFields(LambdaCaptureFields,
1165 LambdaThisCaptureField);
1166 if (LambdaThisCaptureField) {
1167 // If the lambda captures the object referred to by '*this' - either by
1168 // value or by reference, make sure CXXThisValue points to the correct
1169 // object.
1171 // Get the lvalue for the field (which is a copy of the enclosing object
1172 // or contains the address of the enclosing object).
1173 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1174 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1175 // If the enclosing object was captured by value, just use its address.
1176 CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1177 } else {
1178 // Load the lvalue pointed to by the field, since '*this' was captured
1179 // by reference.
1180 CXXThisValue =
1181 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1184 for (auto *FD : MD->getParent()->fields()) {
1185 if (FD->hasCapturedVLAType()) {
1186 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1187 SourceLocation()).getScalarVal();
1188 auto VAT = FD->getCapturedVLAType();
1189 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1192 } else {
1193 // Not in a lambda; just use 'this' from the method.
1194 // FIXME: Should we generate a new load for each use of 'this'? The
1195 // fast register allocator would be happier...
1196 CXXThisValue = CXXABIThisValue;
1199 // Check the 'this' pointer once per function, if it's available.
1200 if (CXXABIThisValue) {
1201 SanitizerSet SkippedChecks;
1202 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1203 QualType ThisTy = MD->getThisType();
1205 // If this is the call operator of a lambda with no capture-default, it
1206 // may have a static invoker function, which may call this operator with
1207 // a null 'this' pointer.
1208 if (isLambdaCallOperator(MD) &&
1209 MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1210 SkippedChecks.set(SanitizerKind::Null, true);
1212 EmitTypeCheck(
1213 isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall : TCK_MemberCall,
1214 Loc, CXXABIThisValue, ThisTy, CXXABIThisAlignment, SkippedChecks);
1218 // If any of the arguments have a variably modified type, make sure to
1219 // emit the type size, but only if the function is not naked. Naked functions
1220 // have no prolog to run this evaluation.
1221 if (!FD || !FD->hasAttr<NakedAttr>()) {
1222 for (const VarDecl *VD : Args) {
1223 // Dig out the type as written from ParmVarDecls; it's unclear whether
1224 // the standard (C99 6.9.1p10) requires this, but we're following the
1225 // precedent set by gcc.
1226 QualType Ty;
1227 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1228 Ty = PVD->getOriginalType();
1229 else
1230 Ty = VD->getType();
1232 if (Ty->isVariablyModifiedType())
1233 EmitVariablyModifiedType(Ty);
1236 // Emit a location at the end of the prologue.
1237 if (CGDebugInfo *DI = getDebugInfo())
1238 DI->EmitLocation(Builder, StartLoc);
1239 // TODO: Do we need to handle this in two places like we do with
1240 // target-features/target-cpu?
1241 if (CurFuncDecl)
1242 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1243 LargestVectorWidth = VecWidth->getVectorWidth();
1246 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1247 incrementProfileCounter(Body);
1248 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1249 EmitCompoundStmtWithoutScope(*S);
1250 else
1251 EmitStmt(Body);
1253 // This is checked after emitting the function body so we know if there
1254 // are any permitted infinite loops.
1255 if (checkIfFunctionMustProgress())
1256 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1259 /// When instrumenting to collect profile data, the counts for some blocks
1260 /// such as switch cases need to not include the fall-through counts, so
1261 /// emit a branch around the instrumentation code. When not instrumenting,
1262 /// this just calls EmitBlock().
1263 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1264 const Stmt *S) {
1265 llvm::BasicBlock *SkipCountBB = nullptr;
1266 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1267 // When instrumenting for profiling, the fallthrough to certain
1268 // statements needs to skip over the instrumentation code so that we
1269 // get an accurate count.
1270 SkipCountBB = createBasicBlock("skipcount");
1271 EmitBranch(SkipCountBB);
1273 EmitBlock(BB);
1274 uint64_t CurrentCount = getCurrentProfileCount();
1275 incrementProfileCounter(S);
1276 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1277 if (SkipCountBB)
1278 EmitBlock(SkipCountBB);
1281 /// Tries to mark the given function nounwind based on the
1282 /// non-existence of any throwing calls within it. We believe this is
1283 /// lightweight enough to do at -O0.
1284 static void TryMarkNoThrow(llvm::Function *F) {
1285 // LLVM treats 'nounwind' on a function as part of the type, so we
1286 // can't do this on functions that can be overwritten.
1287 if (F->isInterposable()) return;
1289 for (llvm::BasicBlock &BB : *F)
1290 for (llvm::Instruction &I : BB)
1291 if (I.mayThrow())
1292 return;
1294 F->setDoesNotThrow();
1297 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1298 FunctionArgList &Args) {
1299 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1300 QualType ResTy = FD->getReturnType();
1302 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1303 if (MD && MD->isInstance()) {
1304 if (CGM.getCXXABI().HasThisReturn(GD))
1305 ResTy = MD->getThisType();
1306 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1307 ResTy = CGM.getContext().VoidPtrTy;
1308 CGM.getCXXABI().buildThisParam(*this, Args);
1311 // The base version of an inheriting constructor whose constructed base is a
1312 // virtual base is not passed any arguments (because it doesn't actually call
1313 // the inherited constructor).
1314 bool PassedParams = true;
1315 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1316 if (auto Inherited = CD->getInheritedConstructor())
1317 PassedParams =
1318 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1320 if (PassedParams) {
1321 for (auto *Param : FD->parameters()) {
1322 Args.push_back(Param);
1323 if (!Param->hasAttr<PassObjectSizeAttr>())
1324 continue;
1326 auto *Implicit = ImplicitParamDecl::Create(
1327 getContext(), Param->getDeclContext(), Param->getLocation(),
1328 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1329 SizeArguments[Param] = Implicit;
1330 Args.push_back(Implicit);
1334 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1335 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1337 return ResTy;
1340 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1341 const CGFunctionInfo &FnInfo) {
1342 assert(Fn && "generating code for null Function");
1343 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1344 CurGD = GD;
1346 FunctionArgList Args;
1347 QualType ResTy = BuildFunctionArgList(GD, Args);
1349 if (FD->isInlineBuiltinDeclaration()) {
1350 // When generating code for a builtin with an inline declaration, use a
1351 // mangled name to hold the actual body, while keeping an external
1352 // definition in case the function pointer is referenced somewhere.
1353 std::string FDInlineName = (Fn->getName() + ".inline").str();
1354 llvm::Module *M = Fn->getParent();
1355 llvm::Function *Clone = M->getFunction(FDInlineName);
1356 if (!Clone) {
1357 Clone = llvm::Function::Create(Fn->getFunctionType(),
1358 llvm::GlobalValue::InternalLinkage,
1359 Fn->getAddressSpace(), FDInlineName, M);
1360 Clone->addFnAttr(llvm::Attribute::AlwaysInline);
1362 Fn->setLinkage(llvm::GlobalValue::ExternalLinkage);
1363 Fn = Clone;
1364 } else {
1365 // Detect the unusual situation where an inline version is shadowed by a
1366 // non-inline version. In that case we should pick the external one
1367 // everywhere. That's GCC behavior too. Unfortunately, I cannot find a way
1368 // to detect that situation before we reach codegen, so do some late
1369 // replacement.
1370 for (const FunctionDecl *PD = FD->getPreviousDecl(); PD;
1371 PD = PD->getPreviousDecl()) {
1372 if (LLVM_UNLIKELY(PD->isInlineBuiltinDeclaration())) {
1373 std::string FDInlineName = (Fn->getName() + ".inline").str();
1374 llvm::Module *M = Fn->getParent();
1375 if (llvm::Function *Clone = M->getFunction(FDInlineName)) {
1376 Clone->replaceAllUsesWith(Fn);
1377 Clone->eraseFromParent();
1379 break;
1384 // Check if we should generate debug info for this function.
1385 if (FD->hasAttr<NoDebugAttr>()) {
1386 // Clear non-distinct debug info that was possibly attached to the function
1387 // due to an earlier declaration without the nodebug attribute
1388 Fn->setSubprogram(nullptr);
1389 // Disable debug info indefinitely for this function
1390 DebugInfo = nullptr;
1393 // The function might not have a body if we're generating thunks for a
1394 // function declaration.
1395 SourceRange BodyRange;
1396 if (Stmt *Body = FD->getBody())
1397 BodyRange = Body->getSourceRange();
1398 else
1399 BodyRange = FD->getLocation();
1400 CurEHLocation = BodyRange.getEnd();
1402 // Use the location of the start of the function to determine where
1403 // the function definition is located. By default use the location
1404 // of the declaration as the location for the subprogram. A function
1405 // may lack a declaration in the source code if it is created by code
1406 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1407 SourceLocation Loc = FD->getLocation();
1409 // If this is a function specialization then use the pattern body
1410 // as the location for the function.
1411 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1412 if (SpecDecl->hasBody(SpecDecl))
1413 Loc = SpecDecl->getLocation();
1415 Stmt *Body = FD->getBody();
1417 if (Body) {
1418 // Coroutines always emit lifetime markers.
1419 if (isa<CoroutineBodyStmt>(Body))
1420 ShouldEmitLifetimeMarkers = true;
1422 // Initialize helper which will detect jumps which can cause invalid
1423 // lifetime markers.
1424 if (ShouldEmitLifetimeMarkers)
1425 Bypasses.Init(Body);
1428 // Emit the standard function prologue.
1429 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1431 // Save parameters for coroutine function.
1432 if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
1433 llvm::append_range(FnArgs, FD->parameters());
1435 // Generate the body of the function.
1436 PGO.assignRegionCounters(GD, CurFn);
1437 if (isa<CXXDestructorDecl>(FD))
1438 EmitDestructorBody(Args);
1439 else if (isa<CXXConstructorDecl>(FD))
1440 EmitConstructorBody(Args);
1441 else if (getLangOpts().CUDA &&
1442 !getLangOpts().CUDAIsDevice &&
1443 FD->hasAttr<CUDAGlobalAttr>())
1444 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1445 else if (isa<CXXMethodDecl>(FD) &&
1446 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1447 // The lambda static invoker function is special, because it forwards or
1448 // clones the body of the function call operator (but is actually static).
1449 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1450 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1451 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1452 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1453 // Implicit copy-assignment gets the same special treatment as implicit
1454 // copy-constructors.
1455 emitImplicitAssignmentOperatorBody(Args);
1456 } else if (Body) {
1457 EmitFunctionBody(Body);
1458 } else
1459 llvm_unreachable("no definition for emitted function");
1461 // C++11 [stmt.return]p2:
1462 // Flowing off the end of a function [...] results in undefined behavior in
1463 // a value-returning function.
1464 // C11 6.9.1p12:
1465 // If the '}' that terminates a function is reached, and the value of the
1466 // function call is used by the caller, the behavior is undefined.
1467 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1468 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1469 bool ShouldEmitUnreachable =
1470 CGM.getCodeGenOpts().StrictReturn ||
1471 !CGM.MayDropFunctionReturn(FD->getASTContext(), FD->getReturnType());
1472 if (SanOpts.has(SanitizerKind::Return)) {
1473 SanitizerScope SanScope(this);
1474 llvm::Value *IsFalse = Builder.getFalse();
1475 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1476 SanitizerHandler::MissingReturn,
1477 EmitCheckSourceLocation(FD->getLocation()), std::nullopt);
1478 } else if (ShouldEmitUnreachable) {
1479 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1480 EmitTrapCall(llvm::Intrinsic::trap);
1482 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1483 Builder.CreateUnreachable();
1484 Builder.ClearInsertionPoint();
1488 // Emit the standard function epilogue.
1489 FinishFunction(BodyRange.getEnd());
1491 // If we haven't marked the function nothrow through other means, do
1492 // a quick pass now to see if we can.
1493 if (!CurFn->doesNotThrow())
1494 TryMarkNoThrow(CurFn);
1497 /// ContainsLabel - Return true if the statement contains a label in it. If
1498 /// this statement is not executed normally, it not containing a label means
1499 /// that we can just remove the code.
1500 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1501 // Null statement, not a label!
1502 if (!S) return false;
1504 // If this is a label, we have to emit the code, consider something like:
1505 // if (0) { ... foo: bar(); } goto foo;
1507 // TODO: If anyone cared, we could track __label__'s, since we know that you
1508 // can't jump to one from outside their declared region.
1509 if (isa<LabelStmt>(S))
1510 return true;
1512 // If this is a case/default statement, and we haven't seen a switch, we have
1513 // to emit the code.
1514 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1515 return true;
1517 // If this is a switch statement, we want to ignore cases below it.
1518 if (isa<SwitchStmt>(S))
1519 IgnoreCaseStmts = true;
1521 // Scan subexpressions for verboten labels.
1522 for (const Stmt *SubStmt : S->children())
1523 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1524 return true;
1526 return false;
1529 /// containsBreak - Return true if the statement contains a break out of it.
1530 /// If the statement (recursively) contains a switch or loop with a break
1531 /// inside of it, this is fine.
1532 bool CodeGenFunction::containsBreak(const Stmt *S) {
1533 // Null statement, not a label!
1534 if (!S) return false;
1536 // If this is a switch or loop that defines its own break scope, then we can
1537 // include it and anything inside of it.
1538 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1539 isa<ForStmt>(S))
1540 return false;
1542 if (isa<BreakStmt>(S))
1543 return true;
1545 // Scan subexpressions for verboten breaks.
1546 for (const Stmt *SubStmt : S->children())
1547 if (containsBreak(SubStmt))
1548 return true;
1550 return false;
1553 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1554 if (!S) return false;
1556 // Some statement kinds add a scope and thus never add a decl to the current
1557 // scope. Note, this list is longer than the list of statements that might
1558 // have an unscoped decl nested within them, but this way is conservatively
1559 // correct even if more statement kinds are added.
1560 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1561 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1562 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1563 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1564 return false;
1566 if (isa<DeclStmt>(S))
1567 return true;
1569 for (const Stmt *SubStmt : S->children())
1570 if (mightAddDeclToScope(SubStmt))
1571 return true;
1573 return false;
1576 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1577 /// to a constant, or if it does but contains a label, return false. If it
1578 /// constant folds return true and set the boolean result in Result.
1579 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1580 bool &ResultBool,
1581 bool AllowLabels) {
1582 llvm::APSInt ResultInt;
1583 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1584 return false;
1586 ResultBool = ResultInt.getBoolValue();
1587 return true;
1590 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1591 /// to a constant, or if it does but contains a label, return false. If it
1592 /// constant folds return true and set the folded value.
1593 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1594 llvm::APSInt &ResultInt,
1595 bool AllowLabels) {
1596 // FIXME: Rename and handle conversion of other evaluatable things
1597 // to bool.
1598 Expr::EvalResult Result;
1599 if (!Cond->EvaluateAsInt(Result, getContext()))
1600 return false; // Not foldable, not integer or not fully evaluatable.
1602 llvm::APSInt Int = Result.Val.getInt();
1603 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1604 return false; // Contains a label.
1606 ResultInt = Int;
1607 return true;
1610 /// Determine whether the given condition is an instrumentable condition
1611 /// (i.e. no "&&" or "||").
1612 bool CodeGenFunction::isInstrumentedCondition(const Expr *C) {
1613 // Bypass simplistic logical-NOT operator before determining whether the
1614 // condition contains any other logical operator.
1615 if (const UnaryOperator *UnOp = dyn_cast<UnaryOperator>(C->IgnoreParens()))
1616 if (UnOp->getOpcode() == UO_LNot)
1617 C = UnOp->getSubExpr();
1619 const BinaryOperator *BOp = dyn_cast<BinaryOperator>(C->IgnoreParens());
1620 return (!BOp || !BOp->isLogicalOp());
1623 /// EmitBranchToCounterBlock - Emit a conditional branch to a new block that
1624 /// increments a profile counter based on the semantics of the given logical
1625 /// operator opcode. This is used to instrument branch condition coverage for
1626 /// logical operators.
1627 void CodeGenFunction::EmitBranchToCounterBlock(
1628 const Expr *Cond, BinaryOperator::Opcode LOp, llvm::BasicBlock *TrueBlock,
1629 llvm::BasicBlock *FalseBlock, uint64_t TrueCount /* = 0 */,
1630 Stmt::Likelihood LH /* =None */, const Expr *CntrIdx /* = nullptr */) {
1631 // If not instrumenting, just emit a branch.
1632 bool InstrumentRegions = CGM.getCodeGenOpts().hasProfileClangInstr();
1633 if (!InstrumentRegions || !isInstrumentedCondition(Cond))
1634 return EmitBranchOnBoolExpr(Cond, TrueBlock, FalseBlock, TrueCount, LH);
1636 llvm::BasicBlock *ThenBlock = nullptr;
1637 llvm::BasicBlock *ElseBlock = nullptr;
1638 llvm::BasicBlock *NextBlock = nullptr;
1640 // Create the block we'll use to increment the appropriate counter.
1641 llvm::BasicBlock *CounterIncrBlock = createBasicBlock("lop.rhscnt");
1643 // Set block pointers according to Logical-AND (BO_LAnd) semantics. This
1644 // means we need to evaluate the condition and increment the counter on TRUE:
1646 // if (Cond)
1647 // goto CounterIncrBlock;
1648 // else
1649 // goto FalseBlock;
1651 // CounterIncrBlock:
1652 // Counter++;
1653 // goto TrueBlock;
1655 if (LOp == BO_LAnd) {
1656 ThenBlock = CounterIncrBlock;
1657 ElseBlock = FalseBlock;
1658 NextBlock = TrueBlock;
1661 // Set block pointers according to Logical-OR (BO_LOr) semantics. This means
1662 // we need to evaluate the condition and increment the counter on FALSE:
1664 // if (Cond)
1665 // goto TrueBlock;
1666 // else
1667 // goto CounterIncrBlock;
1669 // CounterIncrBlock:
1670 // Counter++;
1671 // goto FalseBlock;
1673 else if (LOp == BO_LOr) {
1674 ThenBlock = TrueBlock;
1675 ElseBlock = CounterIncrBlock;
1676 NextBlock = FalseBlock;
1677 } else {
1678 llvm_unreachable("Expected Opcode must be that of a Logical Operator");
1681 // Emit Branch based on condition.
1682 EmitBranchOnBoolExpr(Cond, ThenBlock, ElseBlock, TrueCount, LH);
1684 // Emit the block containing the counter increment(s).
1685 EmitBlock(CounterIncrBlock);
1687 // Increment corresponding counter; if index not provided, use Cond as index.
1688 incrementProfileCounter(CntrIdx ? CntrIdx : Cond);
1690 // Go to the next block.
1691 EmitBranch(NextBlock);
1694 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1695 /// statement) to the specified blocks. Based on the condition, this might try
1696 /// to simplify the codegen of the conditional based on the branch.
1697 /// \param LH The value of the likelihood attribute on the True branch.
1698 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1699 llvm::BasicBlock *TrueBlock,
1700 llvm::BasicBlock *FalseBlock,
1701 uint64_t TrueCount,
1702 Stmt::Likelihood LH) {
1703 Cond = Cond->IgnoreParens();
1705 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1707 // Handle X && Y in a condition.
1708 if (CondBOp->getOpcode() == BO_LAnd) {
1709 // If we have "1 && X", simplify the code. "0 && X" would have constant
1710 // folded if the case was simple enough.
1711 bool ConstantBool = false;
1712 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1713 ConstantBool) {
1714 // br(1 && X) -> br(X).
1715 incrementProfileCounter(CondBOp);
1716 return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1717 FalseBlock, TrueCount, LH);
1720 // If we have "X && 1", simplify the code to use an uncond branch.
1721 // "X && 0" would have been constant folded to 0.
1722 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1723 ConstantBool) {
1724 // br(X && 1) -> br(X).
1725 return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LAnd, TrueBlock,
1726 FalseBlock, TrueCount, LH, CondBOp);
1729 // Emit the LHS as a conditional. If the LHS conditional is false, we
1730 // want to jump to the FalseBlock.
1731 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1732 // The counter tells us how often we evaluate RHS, and all of TrueCount
1733 // can be propagated to that branch.
1734 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1736 ConditionalEvaluation eval(*this);
1738 ApplyDebugLocation DL(*this, Cond);
1739 // Propagate the likelihood attribute like __builtin_expect
1740 // __builtin_expect(X && Y, 1) -> X and Y are likely
1741 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1742 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1743 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1744 EmitBlock(LHSTrue);
1747 incrementProfileCounter(CondBOp);
1748 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1750 // Any temporaries created here are conditional.
1751 eval.begin(*this);
1752 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LAnd, TrueBlock,
1753 FalseBlock, TrueCount, LH);
1754 eval.end(*this);
1756 return;
1759 if (CondBOp->getOpcode() == BO_LOr) {
1760 // If we have "0 || X", simplify the code. "1 || X" would have constant
1761 // folded if the case was simple enough.
1762 bool ConstantBool = false;
1763 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1764 !ConstantBool) {
1765 // br(0 || X) -> br(X).
1766 incrementProfileCounter(CondBOp);
1767 return EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock,
1768 FalseBlock, TrueCount, LH);
1771 // If we have "X || 0", simplify the code to use an uncond branch.
1772 // "X || 1" would have been constant folded to 1.
1773 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1774 !ConstantBool) {
1775 // br(X || 0) -> br(X).
1776 return EmitBranchToCounterBlock(CondBOp->getLHS(), BO_LOr, TrueBlock,
1777 FalseBlock, TrueCount, LH, CondBOp);
1780 // Emit the LHS as a conditional. If the LHS conditional is true, we
1781 // want to jump to the TrueBlock.
1782 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1783 // We have the count for entry to the RHS and for the whole expression
1784 // being true, so we can divy up True count between the short circuit and
1785 // the RHS.
1786 uint64_t LHSCount =
1787 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1788 uint64_t RHSCount = TrueCount - LHSCount;
1790 ConditionalEvaluation eval(*this);
1792 // Propagate the likelihood attribute like __builtin_expect
1793 // __builtin_expect(X || Y, 1) -> only Y is likely
1794 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1795 ApplyDebugLocation DL(*this, Cond);
1796 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1797 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1798 EmitBlock(LHSFalse);
1801 incrementProfileCounter(CondBOp);
1802 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1804 // Any temporaries created here are conditional.
1805 eval.begin(*this);
1806 EmitBranchToCounterBlock(CondBOp->getRHS(), BO_LOr, TrueBlock, FalseBlock,
1807 RHSCount, LH);
1809 eval.end(*this);
1811 return;
1815 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1816 // br(!x, t, f) -> br(x, f, t)
1817 if (CondUOp->getOpcode() == UO_LNot) {
1818 // Negate the count.
1819 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1820 // The values of the enum are chosen to make this negation possible.
1821 LH = static_cast<Stmt::Likelihood>(-LH);
1822 // Negate the condition and swap the destination blocks.
1823 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1824 FalseCount, LH);
1828 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1829 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1830 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1831 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1833 // The ConditionalOperator itself has no likelihood information for its
1834 // true and false branches. This matches the behavior of __builtin_expect.
1835 ConditionalEvaluation cond(*this);
1836 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1837 getProfileCount(CondOp), Stmt::LH_None);
1839 // When computing PGO branch weights, we only know the overall count for
1840 // the true block. This code is essentially doing tail duplication of the
1841 // naive code-gen, introducing new edges for which counts are not
1842 // available. Divide the counts proportionally between the LHS and RHS of
1843 // the conditional operator.
1844 uint64_t LHSScaledTrueCount = 0;
1845 if (TrueCount) {
1846 double LHSRatio =
1847 getProfileCount(CondOp) / (double)getCurrentProfileCount();
1848 LHSScaledTrueCount = TrueCount * LHSRatio;
1851 cond.begin(*this);
1852 EmitBlock(LHSBlock);
1853 incrementProfileCounter(CondOp);
1855 ApplyDebugLocation DL(*this, Cond);
1856 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1857 LHSScaledTrueCount, LH);
1859 cond.end(*this);
1861 cond.begin(*this);
1862 EmitBlock(RHSBlock);
1863 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1864 TrueCount - LHSScaledTrueCount, LH);
1865 cond.end(*this);
1867 return;
1870 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1871 // Conditional operator handling can give us a throw expression as a
1872 // condition for a case like:
1873 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1874 // Fold this to:
1875 // br(c, throw x, br(y, t, f))
1876 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1877 return;
1880 // Emit the code with the fully general case.
1881 llvm::Value *CondV;
1883 ApplyDebugLocation DL(*this, Cond);
1884 CondV = EvaluateExprAsBool(Cond);
1887 llvm::MDNode *Weights = nullptr;
1888 llvm::MDNode *Unpredictable = nullptr;
1890 // If the branch has a condition wrapped by __builtin_unpredictable,
1891 // create metadata that specifies that the branch is unpredictable.
1892 // Don't bother if not optimizing because that metadata would not be used.
1893 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1894 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1895 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1896 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1897 llvm::MDBuilder MDHelper(getLLVMContext());
1898 Unpredictable = MDHelper.createUnpredictable();
1902 // If there is a Likelihood knowledge for the cond, lower it.
1903 // Note that if not optimizing this won't emit anything.
1904 llvm::Value *NewCondV = emitCondLikelihoodViaExpectIntrinsic(CondV, LH);
1905 if (CondV != NewCondV)
1906 CondV = NewCondV;
1907 else {
1908 // Otherwise, lower profile counts. Note that we do this even at -O0.
1909 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1910 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
1913 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1916 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1917 /// specified stmt yet.
1918 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1919 CGM.ErrorUnsupported(S, Type);
1922 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1923 /// variable-length array whose elements have a non-zero bit-pattern.
1925 /// \param baseType the inner-most element type of the array
1926 /// \param src - a char* pointing to the bit-pattern for a single
1927 /// base element of the array
1928 /// \param sizeInChars - the total size of the VLA, in chars
1929 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1930 Address dest, Address src,
1931 llvm::Value *sizeInChars) {
1932 CGBuilderTy &Builder = CGF.Builder;
1934 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1935 llvm::Value *baseSizeInChars
1936 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1938 Address begin =
1939 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1940 llvm::Value *end = Builder.CreateInBoundsGEP(
1941 begin.getElementType(), begin.getPointer(), sizeInChars, "vla.end");
1943 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1944 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1945 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1947 // Make a loop over the VLA. C99 guarantees that the VLA element
1948 // count must be nonzero.
1949 CGF.EmitBlock(loopBB);
1951 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1952 cur->addIncoming(begin.getPointer(), originBB);
1954 CharUnits curAlign =
1955 dest.getAlignment().alignmentOfArrayElement(baseSize);
1957 // memcpy the individual element bit-pattern.
1958 Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
1959 /*volatile*/ false);
1961 // Go to the next element.
1962 llvm::Value *next =
1963 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1965 // Leave if that's the end of the VLA.
1966 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1967 Builder.CreateCondBr(done, contBB, loopBB);
1968 cur->addIncoming(next, loopBB);
1970 CGF.EmitBlock(contBB);
1973 void
1974 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1975 // Ignore empty classes in C++.
1976 if (getLangOpts().CPlusPlus) {
1977 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1978 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1979 return;
1983 // Cast the dest ptr to the appropriate i8 pointer type.
1984 if (DestPtr.getElementType() != Int8Ty)
1985 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1987 // Get size and alignment info for this aggregate.
1988 CharUnits size = getContext().getTypeSizeInChars(Ty);
1990 llvm::Value *SizeVal;
1991 const VariableArrayType *vla;
1993 // Don't bother emitting a zero-byte memset.
1994 if (size.isZero()) {
1995 // But note that getTypeInfo returns 0 for a VLA.
1996 if (const VariableArrayType *vlaType =
1997 dyn_cast_or_null<VariableArrayType>(
1998 getContext().getAsArrayType(Ty))) {
1999 auto VlaSize = getVLASize(vlaType);
2000 SizeVal = VlaSize.NumElts;
2001 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
2002 if (!eltSize.isOne())
2003 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
2004 vla = vlaType;
2005 } else {
2006 return;
2008 } else {
2009 SizeVal = CGM.getSize(size);
2010 vla = nullptr;
2013 // If the type contains a pointer to data member we can't memset it to zero.
2014 // Instead, create a null constant and copy it to the destination.
2015 // TODO: there are other patterns besides zero that we can usefully memset,
2016 // like -1, which happens to be the pattern used by member-pointers.
2017 if (!CGM.getTypes().isZeroInitializable(Ty)) {
2018 // For a VLA, emit a single element, then splat that over the VLA.
2019 if (vla) Ty = getContext().getBaseElementType(vla);
2021 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
2023 llvm::GlobalVariable *NullVariable =
2024 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
2025 /*isConstant=*/true,
2026 llvm::GlobalVariable::PrivateLinkage,
2027 NullConstant, Twine());
2028 CharUnits NullAlign = DestPtr.getAlignment();
2029 NullVariable->setAlignment(NullAlign.getAsAlign());
2030 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
2031 Builder.getInt8Ty(), NullAlign);
2033 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
2035 // Get and call the appropriate llvm.memcpy overload.
2036 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
2037 return;
2040 // Otherwise, just memset the whole thing to zero. This is legal
2041 // because in LLVM, all default initializers (other than the ones we just
2042 // handled above) are guaranteed to have a bit pattern of all zeros.
2043 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
2046 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
2047 // Make sure that there is a block for the indirect goto.
2048 if (!IndirectBranch)
2049 GetIndirectGotoBlock();
2051 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
2053 // Make sure the indirect branch includes all of the address-taken blocks.
2054 IndirectBranch->addDestination(BB);
2055 return llvm::BlockAddress::get(CurFn, BB);
2058 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
2059 // If we already made the indirect branch for indirect goto, return its block.
2060 if (IndirectBranch) return IndirectBranch->getParent();
2062 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
2064 // Create the PHI node that indirect gotos will add entries to.
2065 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
2066 "indirect.goto.dest");
2068 // Create the indirect branch instruction.
2069 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
2070 return IndirectBranch->getParent();
2073 /// Computes the length of an array in elements, as well as the base
2074 /// element type and a properly-typed first element pointer.
2075 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
2076 QualType &baseType,
2077 Address &addr) {
2078 const ArrayType *arrayType = origArrayType;
2080 // If it's a VLA, we have to load the stored size. Note that
2081 // this is the size of the VLA in bytes, not its size in elements.
2082 llvm::Value *numVLAElements = nullptr;
2083 if (isa<VariableArrayType>(arrayType)) {
2084 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
2086 // Walk into all VLAs. This doesn't require changes to addr,
2087 // which has type T* where T is the first non-VLA element type.
2088 do {
2089 QualType elementType = arrayType->getElementType();
2090 arrayType = getContext().getAsArrayType(elementType);
2092 // If we only have VLA components, 'addr' requires no adjustment.
2093 if (!arrayType) {
2094 baseType = elementType;
2095 return numVLAElements;
2097 } while (isa<VariableArrayType>(arrayType));
2099 // We get out here only if we find a constant array type
2100 // inside the VLA.
2103 // We have some number of constant-length arrays, so addr should
2104 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
2105 // down to the first element of addr.
2106 SmallVector<llvm::Value*, 8> gepIndices;
2108 // GEP down to the array type.
2109 llvm::ConstantInt *zero = Builder.getInt32(0);
2110 gepIndices.push_back(zero);
2112 uint64_t countFromCLAs = 1;
2113 QualType eltType;
2115 llvm::ArrayType *llvmArrayType =
2116 dyn_cast<llvm::ArrayType>(addr.getElementType());
2117 while (llvmArrayType) {
2118 assert(isa<ConstantArrayType>(arrayType));
2119 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
2120 == llvmArrayType->getNumElements());
2122 gepIndices.push_back(zero);
2123 countFromCLAs *= llvmArrayType->getNumElements();
2124 eltType = arrayType->getElementType();
2126 llvmArrayType =
2127 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
2128 arrayType = getContext().getAsArrayType(arrayType->getElementType());
2129 assert((!llvmArrayType || arrayType) &&
2130 "LLVM and Clang types are out-of-synch");
2133 if (arrayType) {
2134 // From this point onwards, the Clang array type has been emitted
2135 // as some other type (probably a packed struct). Compute the array
2136 // size, and just emit the 'begin' expression as a bitcast.
2137 while (arrayType) {
2138 countFromCLAs *=
2139 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
2140 eltType = arrayType->getElementType();
2141 arrayType = getContext().getAsArrayType(eltType);
2144 llvm::Type *baseType = ConvertType(eltType);
2145 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
2146 } else {
2147 // Create the actual GEP.
2148 addr = Address(Builder.CreateInBoundsGEP(
2149 addr.getElementType(), addr.getPointer(), gepIndices, "array.begin"),
2150 ConvertTypeForMem(eltType),
2151 addr.getAlignment());
2154 baseType = eltType;
2156 llvm::Value *numElements
2157 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
2159 // If we had any VLA dimensions, factor them in.
2160 if (numVLAElements)
2161 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
2163 return numElements;
2166 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
2167 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2168 assert(vla && "type was not a variable array type!");
2169 return getVLASize(vla);
2172 CodeGenFunction::VlaSizePair
2173 CodeGenFunction::getVLASize(const VariableArrayType *type) {
2174 // The number of elements so far; always size_t.
2175 llvm::Value *numElements = nullptr;
2177 QualType elementType;
2178 do {
2179 elementType = type->getElementType();
2180 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
2181 assert(vlaSize && "no size for VLA!");
2182 assert(vlaSize->getType() == SizeTy);
2184 if (!numElements) {
2185 numElements = vlaSize;
2186 } else {
2187 // It's undefined behavior if this wraps around, so mark it that way.
2188 // FIXME: Teach -fsanitize=undefined to trap this.
2189 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2191 } while ((type = getContext().getAsVariableArrayType(elementType)));
2193 return { numElements, elementType };
2196 CodeGenFunction::VlaSizePair
2197 CodeGenFunction::getVLAElements1D(QualType type) {
2198 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2199 assert(vla && "type was not a variable array type!");
2200 return getVLAElements1D(vla);
2203 CodeGenFunction::VlaSizePair
2204 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2205 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2206 assert(VlaSize && "no size for VLA!");
2207 assert(VlaSize->getType() == SizeTy);
2208 return { VlaSize, Vla->getElementType() };
2211 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2212 assert(type->isVariablyModifiedType() &&
2213 "Must pass variably modified type to EmitVLASizes!");
2215 EnsureInsertPoint();
2217 // We're going to walk down into the type and look for VLA
2218 // expressions.
2219 do {
2220 assert(type->isVariablyModifiedType());
2222 const Type *ty = type.getTypePtr();
2223 switch (ty->getTypeClass()) {
2225 #define TYPE(Class, Base)
2226 #define ABSTRACT_TYPE(Class, Base)
2227 #define NON_CANONICAL_TYPE(Class, Base)
2228 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2229 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2230 #include "clang/AST/TypeNodes.inc"
2231 llvm_unreachable("unexpected dependent type!");
2233 // These types are never variably-modified.
2234 case Type::Builtin:
2235 case Type::Complex:
2236 case Type::Vector:
2237 case Type::ExtVector:
2238 case Type::ConstantMatrix:
2239 case Type::Record:
2240 case Type::Enum:
2241 case Type::Using:
2242 case Type::TemplateSpecialization:
2243 case Type::ObjCTypeParam:
2244 case Type::ObjCObject:
2245 case Type::ObjCInterface:
2246 case Type::ObjCObjectPointer:
2247 case Type::BitInt:
2248 llvm_unreachable("type class is never variably-modified!");
2250 case Type::Elaborated:
2251 type = cast<ElaboratedType>(ty)->getNamedType();
2252 break;
2254 case Type::Adjusted:
2255 type = cast<AdjustedType>(ty)->getAdjustedType();
2256 break;
2258 case Type::Decayed:
2259 type = cast<DecayedType>(ty)->getPointeeType();
2260 break;
2262 case Type::Pointer:
2263 type = cast<PointerType>(ty)->getPointeeType();
2264 break;
2266 case Type::BlockPointer:
2267 type = cast<BlockPointerType>(ty)->getPointeeType();
2268 break;
2270 case Type::LValueReference:
2271 case Type::RValueReference:
2272 type = cast<ReferenceType>(ty)->getPointeeType();
2273 break;
2275 case Type::MemberPointer:
2276 type = cast<MemberPointerType>(ty)->getPointeeType();
2277 break;
2279 case Type::ConstantArray:
2280 case Type::IncompleteArray:
2281 // Losing element qualification here is fine.
2282 type = cast<ArrayType>(ty)->getElementType();
2283 break;
2285 case Type::VariableArray: {
2286 // Losing element qualification here is fine.
2287 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2289 // Unknown size indication requires no size computation.
2290 // Otherwise, evaluate and record it.
2291 if (const Expr *sizeExpr = vat->getSizeExpr()) {
2292 // It's possible that we might have emitted this already,
2293 // e.g. with a typedef and a pointer to it.
2294 llvm::Value *&entry = VLASizeMap[sizeExpr];
2295 if (!entry) {
2296 llvm::Value *size = EmitScalarExpr(sizeExpr);
2298 // C11 6.7.6.2p5:
2299 // If the size is an expression that is not an integer constant
2300 // expression [...] each time it is evaluated it shall have a value
2301 // greater than zero.
2302 if (SanOpts.has(SanitizerKind::VLABound)) {
2303 SanitizerScope SanScope(this);
2304 llvm::Value *Zero = llvm::Constant::getNullValue(size->getType());
2305 clang::QualType SEType = sizeExpr->getType();
2306 llvm::Value *CheckCondition =
2307 SEType->isSignedIntegerType()
2308 ? Builder.CreateICmpSGT(size, Zero)
2309 : Builder.CreateICmpUGT(size, Zero);
2310 llvm::Constant *StaticArgs[] = {
2311 EmitCheckSourceLocation(sizeExpr->getBeginLoc()),
2312 EmitCheckTypeDescriptor(SEType)};
2313 EmitCheck(std::make_pair(CheckCondition, SanitizerKind::VLABound),
2314 SanitizerHandler::VLABoundNotPositive, StaticArgs, size);
2317 // Always zexting here would be wrong if it weren't
2318 // undefined behavior to have a negative bound.
2319 // FIXME: What about when size's type is larger than size_t?
2320 entry = Builder.CreateIntCast(size, SizeTy, /*signed*/ false);
2323 type = vat->getElementType();
2324 break;
2327 case Type::FunctionProto:
2328 case Type::FunctionNoProto:
2329 type = cast<FunctionType>(ty)->getReturnType();
2330 break;
2332 case Type::Paren:
2333 case Type::TypeOf:
2334 case Type::UnaryTransform:
2335 case Type::Attributed:
2336 case Type::BTFTagAttributed:
2337 case Type::SubstTemplateTypeParm:
2338 case Type::MacroQualified:
2339 // Keep walking after single level desugaring.
2340 type = type.getSingleStepDesugaredType(getContext());
2341 break;
2343 case Type::Typedef:
2344 case Type::Decltype:
2345 case Type::Auto:
2346 case Type::DeducedTemplateSpecialization:
2347 // Stop walking: nothing to do.
2348 return;
2350 case Type::TypeOfExpr:
2351 // Stop walking: emit typeof expression.
2352 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2353 return;
2355 case Type::Atomic:
2356 type = cast<AtomicType>(ty)->getValueType();
2357 break;
2359 case Type::Pipe:
2360 type = cast<PipeType>(ty)->getElementType();
2361 break;
2363 } while (type->isVariablyModifiedType());
2366 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2367 if (getContext().getBuiltinVaListType()->isArrayType())
2368 return EmitPointerWithAlignment(E);
2369 return EmitLValue(E).getAddress(*this);
2372 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2373 return EmitLValue(E).getAddress(*this);
2376 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2377 const APValue &Init) {
2378 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2379 if (CGDebugInfo *Dbg = getDebugInfo())
2380 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2381 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2384 CodeGenFunction::PeepholeProtection
2385 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2386 // At the moment, the only aggressive peephole we do in IR gen
2387 // is trunc(zext) folding, but if we add more, we can easily
2388 // extend this protection.
2390 if (!rvalue.isScalar()) return PeepholeProtection();
2391 llvm::Value *value = rvalue.getScalarVal();
2392 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2394 // Just make an extra bitcast.
2395 assert(HaveInsertPoint());
2396 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2397 Builder.GetInsertBlock());
2399 PeepholeProtection protection;
2400 protection.Inst = inst;
2401 return protection;
2404 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2405 if (!protection.Inst) return;
2407 // In theory, we could try to duplicate the peepholes now, but whatever.
2408 protection.Inst->eraseFromParent();
2411 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2412 QualType Ty, SourceLocation Loc,
2413 SourceLocation AssumptionLoc,
2414 llvm::Value *Alignment,
2415 llvm::Value *OffsetValue) {
2416 if (Alignment->getType() != IntPtrTy)
2417 Alignment =
2418 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2419 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2420 OffsetValue =
2421 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2422 llvm::Value *TheCheck = nullptr;
2423 if (SanOpts.has(SanitizerKind::Alignment)) {
2424 llvm::Value *PtrIntValue =
2425 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2427 if (OffsetValue) {
2428 bool IsOffsetZero = false;
2429 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2430 IsOffsetZero = CI->isZero();
2432 if (!IsOffsetZero)
2433 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2436 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2437 llvm::Value *Mask =
2438 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2439 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2440 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2442 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2443 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2445 if (!SanOpts.has(SanitizerKind::Alignment))
2446 return;
2447 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2448 OffsetValue, TheCheck, Assumption);
2451 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2452 const Expr *E,
2453 SourceLocation AssumptionLoc,
2454 llvm::Value *Alignment,
2455 llvm::Value *OffsetValue) {
2456 QualType Ty = E->getType();
2457 SourceLocation Loc = E->getExprLoc();
2459 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2460 OffsetValue);
2463 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2464 llvm::Value *AnnotatedVal,
2465 StringRef AnnotationStr,
2466 SourceLocation Location,
2467 const AnnotateAttr *Attr) {
2468 SmallVector<llvm::Value *, 5> Args = {
2469 AnnotatedVal,
2470 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr),
2471 ConstGlobalsPtrTy),
2472 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location),
2473 ConstGlobalsPtrTy),
2474 CGM.EmitAnnotationLineNo(Location),
2476 if (Attr)
2477 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2478 return Builder.CreateCall(AnnotationFn, Args);
2481 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2482 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2483 // FIXME We create a new bitcast for every annotation because that's what
2484 // llvm-gcc was doing.
2485 unsigned AS = V->getType()->getPointerAddressSpace();
2486 llvm::Type *I8PtrTy = Builder.getInt8PtrTy(AS);
2487 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2488 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation,
2489 {I8PtrTy, CGM.ConstGlobalsPtrTy}),
2490 Builder.CreateBitCast(V, I8PtrTy, V->getName()),
2491 I->getAnnotation(), D->getLocation(), I);
2494 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2495 Address Addr) {
2496 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2497 llvm::Value *V = Addr.getPointer();
2498 llvm::Type *VTy = V->getType();
2499 auto *PTy = dyn_cast<llvm::PointerType>(VTy);
2500 unsigned AS = PTy ? PTy->getAddressSpace() : 0;
2501 llvm::PointerType *IntrinTy =
2502 llvm::PointerType::get(CGM.getLLVMContext(), AS);
2503 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2504 {IntrinTy, CGM.ConstGlobalsPtrTy});
2506 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2507 // FIXME Always emit the cast inst so we can differentiate between
2508 // annotation on the first field of a struct and annotation on the struct
2509 // itself.
2510 if (VTy != IntrinTy)
2511 V = Builder.CreateBitCast(V, IntrinTy);
2512 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2513 V = Builder.CreateBitCast(V, VTy);
2516 return Address(V, Addr.getElementType(), Addr.getAlignment());
2519 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2521 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2522 : CGF(CGF) {
2523 assert(!CGF->IsSanitizerScope);
2524 CGF->IsSanitizerScope = true;
2527 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2528 CGF->IsSanitizerScope = false;
2531 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2532 const llvm::Twine &Name,
2533 llvm::BasicBlock *BB,
2534 llvm::BasicBlock::iterator InsertPt) const {
2535 LoopStack.InsertHelper(I);
2536 if (IsSanitizerScope)
2537 I->setNoSanitizeMetadata();
2540 void CGBuilderInserter::InsertHelper(
2541 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2542 llvm::BasicBlock::iterator InsertPt) const {
2543 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2544 if (CGF)
2545 CGF->InsertHelper(I, Name, BB, InsertPt);
2548 // Emits an error if we don't have a valid set of target features for the
2549 // called function.
2550 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2551 const FunctionDecl *TargetDecl) {
2552 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2555 // Emits an error if we don't have a valid set of target features for the
2556 // called function.
2557 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2558 const FunctionDecl *TargetDecl) {
2559 // Early exit if this is an indirect call.
2560 if (!TargetDecl)
2561 return;
2563 // Get the current enclosing function if it exists. If it doesn't
2564 // we can't check the target features anyhow.
2565 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2566 if (!FD)
2567 return;
2569 // Grab the required features for the call. For a builtin this is listed in
2570 // the td file with the default cpu, for an always_inline function this is any
2571 // listed cpu and any listed features.
2572 unsigned BuiltinID = TargetDecl->getBuiltinID();
2573 std::string MissingFeature;
2574 llvm::StringMap<bool> CallerFeatureMap;
2575 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2576 if (BuiltinID) {
2577 StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2578 if (!Builtin::evaluateRequiredTargetFeatures(
2579 FeatureList, CallerFeatureMap)) {
2580 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2581 << TargetDecl->getDeclName()
2582 << FeatureList;
2584 } else if (!TargetDecl->isMultiVersion() &&
2585 TargetDecl->hasAttr<TargetAttr>()) {
2586 // Get the required features for the callee.
2588 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2589 ParsedTargetAttr ParsedAttr =
2590 CGM.getContext().filterFunctionTargetAttrs(TD);
2592 SmallVector<StringRef, 1> ReqFeatures;
2593 llvm::StringMap<bool> CalleeFeatureMap;
2594 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2596 for (const auto &F : ParsedAttr.Features) {
2597 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2598 ReqFeatures.push_back(StringRef(F).substr(1));
2601 for (const auto &F : CalleeFeatureMap) {
2602 // Only positive features are "required".
2603 if (F.getValue())
2604 ReqFeatures.push_back(F.getKey());
2606 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2607 if (!CallerFeatureMap.lookup(Feature)) {
2608 MissingFeature = Feature.str();
2609 return false;
2611 return true;
2613 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2614 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2615 } else if (!FD->isMultiVersion() && FD->hasAttr<TargetAttr>()) {
2616 llvm::StringMap<bool> CalleeFeatureMap;
2617 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2619 for (const auto &F : CalleeFeatureMap) {
2620 if (F.getValue() && (!CallerFeatureMap.lookup(F.getKey()) ||
2621 !CallerFeatureMap.find(F.getKey())->getValue()))
2622 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2623 << FD->getDeclName() << TargetDecl->getDeclName() << F.getKey();
2628 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2629 if (!CGM.getCodeGenOpts().SanitizeStats)
2630 return;
2632 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2633 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2634 CGM.getSanStats().create(IRB, SSK);
2637 void CodeGenFunction::EmitKCFIOperandBundle(
2638 const CGCallee &Callee, SmallVectorImpl<llvm::OperandBundleDef> &Bundles) {
2639 const FunctionProtoType *FP =
2640 Callee.getAbstractInfo().getCalleeFunctionProtoType();
2641 if (FP)
2642 Bundles.emplace_back("kcfi", CGM.CreateKCFITypeId(FP->desugar()));
2645 llvm::Value *CodeGenFunction::FormAArch64ResolverCondition(
2646 const MultiVersionResolverOption &RO) {
2647 llvm::SmallVector<StringRef, 8> CondFeatures;
2648 for (const StringRef &Feature : RO.Conditions.Features) {
2649 // Form condition for features which are not yet enabled in target
2650 if (!getContext().getTargetInfo().hasFeature(Feature))
2651 CondFeatures.push_back(Feature);
2653 if (!CondFeatures.empty()) {
2654 return EmitAArch64CpuSupports(CondFeatures);
2656 return nullptr;
2659 llvm::Value *CodeGenFunction::FormX86ResolverCondition(
2660 const MultiVersionResolverOption &RO) {
2661 llvm::Value *Condition = nullptr;
2663 if (!RO.Conditions.Architecture.empty())
2664 Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2666 if (!RO.Conditions.Features.empty()) {
2667 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2668 Condition =
2669 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2671 return Condition;
2674 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2675 llvm::Function *Resolver,
2676 CGBuilderTy &Builder,
2677 llvm::Function *FuncToReturn,
2678 bool SupportsIFunc) {
2679 if (SupportsIFunc) {
2680 Builder.CreateRet(FuncToReturn);
2681 return;
2684 llvm::SmallVector<llvm::Value *, 10> Args(
2685 llvm::make_pointer_range(Resolver->args()));
2687 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2688 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2690 if (Resolver->getReturnType()->isVoidTy())
2691 Builder.CreateRetVoid();
2692 else
2693 Builder.CreateRet(Result);
2696 void CodeGenFunction::EmitMultiVersionResolver(
2697 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2699 llvm::Triple::ArchType ArchType =
2700 getContext().getTargetInfo().getTriple().getArch();
2702 switch (ArchType) {
2703 case llvm::Triple::x86:
2704 case llvm::Triple::x86_64:
2705 EmitX86MultiVersionResolver(Resolver, Options);
2706 return;
2707 case llvm::Triple::aarch64:
2708 EmitAArch64MultiVersionResolver(Resolver, Options);
2709 return;
2711 default:
2712 assert(false && "Only implemented for x86 and AArch64 targets");
2716 void CodeGenFunction::EmitAArch64MultiVersionResolver(
2717 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2718 assert(!Options.empty() && "No multiversion resolver options found");
2719 assert(Options.back().Conditions.Features.size() == 0 &&
2720 "Default case must be last");
2721 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2722 assert(SupportsIFunc &&
2723 "Multiversion resolver requires target IFUNC support");
2724 bool AArch64CpuInitialized = false;
2725 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2727 for (const MultiVersionResolverOption &RO : Options) {
2728 Builder.SetInsertPoint(CurBlock);
2729 llvm::Value *Condition = FormAArch64ResolverCondition(RO);
2731 // The 'default' or 'all features enabled' case.
2732 if (!Condition) {
2733 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2734 SupportsIFunc);
2735 return;
2738 if (!AArch64CpuInitialized) {
2739 Builder.SetInsertPoint(CurBlock, CurBlock->begin());
2740 EmitAArch64CpuInit();
2741 AArch64CpuInitialized = true;
2742 Builder.SetInsertPoint(CurBlock);
2745 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2746 CGBuilderTy RetBuilder(*this, RetBlock);
2747 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2748 SupportsIFunc);
2749 CurBlock = createBasicBlock("resolver_else", Resolver);
2750 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2753 // If no default, emit an unreachable.
2754 Builder.SetInsertPoint(CurBlock);
2755 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2756 TrapCall->setDoesNotReturn();
2757 TrapCall->setDoesNotThrow();
2758 Builder.CreateUnreachable();
2759 Builder.ClearInsertionPoint();
2762 void CodeGenFunction::EmitX86MultiVersionResolver(
2763 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2765 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2767 // Main function's basic block.
2768 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2769 Builder.SetInsertPoint(CurBlock);
2770 EmitX86CpuInit();
2772 for (const MultiVersionResolverOption &RO : Options) {
2773 Builder.SetInsertPoint(CurBlock);
2774 llvm::Value *Condition = FormX86ResolverCondition(RO);
2776 // The 'default' or 'generic' case.
2777 if (!Condition) {
2778 assert(&RO == Options.end() - 1 &&
2779 "Default or Generic case must be last");
2780 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2781 SupportsIFunc);
2782 return;
2785 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2786 CGBuilderTy RetBuilder(*this, RetBlock);
2787 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2788 SupportsIFunc);
2789 CurBlock = createBasicBlock("resolver_else", Resolver);
2790 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2793 // If no generic/default, emit an unreachable.
2794 Builder.SetInsertPoint(CurBlock);
2795 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2796 TrapCall->setDoesNotReturn();
2797 TrapCall->setDoesNotThrow();
2798 Builder.CreateUnreachable();
2799 Builder.ClearInsertionPoint();
2802 // Loc - where the diagnostic will point, where in the source code this
2803 // alignment has failed.
2804 // SecondaryLoc - if present (will be present if sufficiently different from
2805 // Loc), the diagnostic will additionally point a "Note:" to this location.
2806 // It should be the location where the __attribute__((assume_aligned))
2807 // was written e.g.
2808 void CodeGenFunction::emitAlignmentAssumptionCheck(
2809 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2810 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2811 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2812 llvm::Instruction *Assumption) {
2813 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2814 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2815 llvm::Intrinsic::getDeclaration(
2816 Builder.GetInsertBlock()->getParent()->getParent(),
2817 llvm::Intrinsic::assume) &&
2818 "Assumption should be a call to llvm.assume().");
2819 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2820 "Assumption should be the last instruction of the basic block, "
2821 "since the basic block is still being generated.");
2823 if (!SanOpts.has(SanitizerKind::Alignment))
2824 return;
2826 // Don't check pointers to volatile data. The behavior here is implementation-
2827 // defined.
2828 if (Ty->getPointeeType().isVolatileQualified())
2829 return;
2831 // We need to temorairly remove the assumption so we can insert the
2832 // sanitizer check before it, else the check will be dropped by optimizations.
2833 Assumption->removeFromParent();
2836 SanitizerScope SanScope(this);
2838 if (!OffsetValue)
2839 OffsetValue = Builder.getInt1(false); // no offset.
2841 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2842 EmitCheckSourceLocation(SecondaryLoc),
2843 EmitCheckTypeDescriptor(Ty)};
2844 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2845 EmitCheckValue(Alignment),
2846 EmitCheckValue(OffsetValue)};
2847 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2848 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2851 // We are now in the (new, empty) "cont" basic block.
2852 // Reintroduce the assumption.
2853 Builder.Insert(Assumption);
2854 // FIXME: Assumption still has it's original basic block as it's Parent.
2857 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2858 if (CGDebugInfo *DI = getDebugInfo())
2859 return DI->SourceLocToDebugLoc(Location);
2861 return llvm::DebugLoc();
2864 llvm::Value *
2865 CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
2866 Stmt::Likelihood LH) {
2867 switch (LH) {
2868 case Stmt::LH_None:
2869 return Cond;
2870 case Stmt::LH_Likely:
2871 case Stmt::LH_Unlikely:
2872 // Don't generate llvm.expect on -O0 as the backend won't use it for
2873 // anything.
2874 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
2875 return Cond;
2876 llvm::Type *CondTy = Cond->getType();
2877 assert(CondTy->isIntegerTy(1) && "expecting condition to be a boolean");
2878 llvm::Function *FnExpect =
2879 CGM.getIntrinsic(llvm::Intrinsic::expect, CondTy);
2880 llvm::Value *ExpectedValueOfCond =
2881 llvm::ConstantInt::getBool(CondTy, LH == Stmt::LH_Likely);
2882 return Builder.CreateCall(FnExpect, {Cond, ExpectedValueOfCond},
2883 Cond->getName() + ".expval");
2885 llvm_unreachable("Unknown Likelihood");
2888 llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
2889 unsigned NumElementsDst,
2890 const llvm::Twine &Name) {
2891 auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
2892 unsigned NumElementsSrc = SrcTy->getNumElements();
2893 if (NumElementsSrc == NumElementsDst)
2894 return SrcVec;
2896 std::vector<int> ShuffleMask(NumElementsDst, -1);
2897 for (unsigned MaskIdx = 0;
2898 MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
2899 ShuffleMask[MaskIdx] = MaskIdx;
2901 return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);