1 //===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass inserts stack protectors into functions which need them. A variable
10 // with a random value in it is stored onto the stack before the local variables
11 // are allocated. Upon exiting the block, the stored value is checked. If it's
12 // changed, then there was some sort of violation and the program aborts.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/CodeGen/StackProtector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/EHPersonalities.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/IR/User.h"
44 #include "llvm/InitializePasses.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
56 #define DEBUG_TYPE "stack-protector"
58 STATISTIC(NumFunProtected
, "Number of functions protected");
59 STATISTIC(NumAddrTaken
, "Number of local variables that have their address"
62 static cl::opt
<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
63 cl::init(true), cl::Hidden
);
64 static cl::opt
<bool> DisableCheckNoReturn("disable-check-noreturn-call",
65 cl::init(false), cl::Hidden
);
67 /// InsertStackProtectors - Insert code into the prologue and epilogue of the
70 /// - The prologue code loads and stores the stack guard onto the stack.
71 /// - The epilogue checks the value stored in the prologue against the original
72 /// value. It calls __stack_chk_fail if they differ.
73 static bool InsertStackProtectors(const TargetMachine
*TM
, Function
*F
,
74 DomTreeUpdater
*DTU
, bool &HasPrologue
,
77 /// CreateFailBB - Create a basic block to jump to when the stack protector
79 static BasicBlock
*CreateFailBB(Function
*F
, const Triple
&Trip
);
81 bool SSPLayoutInfo::shouldEmitSDCheck(const BasicBlock
&BB
) const {
82 return HasPrologue
&& !HasIRCheck
&& isa
<ReturnInst
>(BB
.getTerminator());
85 void SSPLayoutInfo::copyToMachineFrameInfo(MachineFrameInfo
&MFI
) const {
89 for (int I
= 0, E
= MFI
.getObjectIndexEnd(); I
!= E
; ++I
) {
90 if (MFI
.isDeadObjectIndex(I
))
93 const AllocaInst
*AI
= MFI
.getObjectAllocation(I
);
97 SSPLayoutMap::const_iterator LI
= Layout
.find(AI
);
98 if (LI
== Layout
.end())
101 MFI
.setObjectSSPLayout(I
, LI
->second
);
105 SSPLayoutInfo
SSPLayoutAnalysis::run(Function
&F
,
106 FunctionAnalysisManager
&FAM
) {
109 Info
.RequireStackProtector
=
110 SSPLayoutAnalysis::requiresStackProtector(&F
, &Info
.Layout
);
111 Info
.SSPBufferSize
= F
.getFnAttributeAsParsedInteger(
112 "stack-protector-buffer-size", SSPLayoutInfo::DefaultSSPBufferSize
);
116 AnalysisKey
SSPLayoutAnalysis::Key
;
118 PreservedAnalyses
StackProtectorPass::run(Function
&F
,
119 FunctionAnalysisManager
&FAM
) {
120 auto &Info
= FAM
.getResult
<SSPLayoutAnalysis
>(F
);
121 auto *DT
= FAM
.getCachedResult
<DominatorTreeAnalysis
>(F
);
122 DomTreeUpdater
DTU(DT
, DomTreeUpdater::UpdateStrategy::Lazy
);
124 if (!Info
.RequireStackProtector
)
125 return PreservedAnalyses::all();
127 // TODO(etienneb): Functions with funclets are not correctly supported now.
128 // Do nothing if this is funclet-based personality.
129 if (F
.hasPersonalityFn()) {
130 EHPersonality Personality
= classifyEHPersonality(F
.getPersonalityFn());
131 if (isFuncletEHPersonality(Personality
))
132 return PreservedAnalyses::all();
136 bool Changed
= InsertStackProtectors(TM
, &F
, DT
? &DTU
: nullptr,
137 Info
.HasPrologue
, Info
.HasIRCheck
);
138 #ifdef EXPENSIVE_CHECKS
139 assert((!DT
|| DT
->verify(DominatorTree::VerificationLevel::Full
)) &&
140 "Failed to maintain validity of domtree!");
144 return PreservedAnalyses::all();
145 PreservedAnalyses PA
;
146 PA
.preserve
<SSPLayoutAnalysis
>();
147 PA
.preserve
<DominatorTreeAnalysis
>();
151 char StackProtector::ID
= 0;
153 StackProtector::StackProtector() : FunctionPass(ID
) {
154 initializeStackProtectorPass(*PassRegistry::getPassRegistry());
157 INITIALIZE_PASS_BEGIN(StackProtector
, DEBUG_TYPE
,
158 "Insert stack protectors", false, true)
159 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
160 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
161 INITIALIZE_PASS_END(StackProtector
, DEBUG_TYPE
,
162 "Insert stack protectors", false, true)
164 FunctionPass
*llvm::createStackProtectorPass() { return new StackProtector(); }
166 void StackProtector::getAnalysisUsage(AnalysisUsage
&AU
) const {
167 AU
.addRequired
<TargetPassConfig
>();
168 AU
.addPreserved
<DominatorTreeWrapperPass
>();
171 bool StackProtector::runOnFunction(Function
&Fn
) {
174 if (auto *DTWP
= getAnalysisIfAvailable
<DominatorTreeWrapperPass
>())
175 DTU
.emplace(DTWP
->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy
);
176 TM
= &getAnalysis
<TargetPassConfig
>().getTM
<TargetMachine
>();
177 LayoutInfo
.HasPrologue
= false;
178 LayoutInfo
.HasIRCheck
= false;
180 LayoutInfo
.SSPBufferSize
= Fn
.getFnAttributeAsParsedInteger(
181 "stack-protector-buffer-size", SSPLayoutInfo::DefaultSSPBufferSize
);
182 if (!requiresStackProtector(F
, &LayoutInfo
.Layout
))
185 // TODO(etienneb): Functions with funclets are not correctly supported now.
186 // Do nothing if this is funclet-based personality.
187 if (Fn
.hasPersonalityFn()) {
188 EHPersonality Personality
= classifyEHPersonality(Fn
.getPersonalityFn());
189 if (isFuncletEHPersonality(Personality
))
195 InsertStackProtectors(TM
, F
, DTU
? &*DTU
: nullptr,
196 LayoutInfo
.HasPrologue
, LayoutInfo
.HasIRCheck
);
197 #ifdef EXPENSIVE_CHECKS
199 DTU
->getDomTree().verify(DominatorTree::VerificationLevel::Full
)) &&
200 "Failed to maintain validity of domtree!");
206 /// \param [out] IsLarge is set to true if a protectable array is found and
207 /// it is "large" ( >= ssp-buffer-size). In the case of a structure with
208 /// multiple arrays, this gets set if any of them is large.
209 static bool ContainsProtectableArray(Type
*Ty
, Module
*M
, unsigned SSPBufferSize
,
210 bool &IsLarge
, bool Strong
,
214 if (ArrayType
*AT
= dyn_cast
<ArrayType
>(Ty
)) {
215 if (!AT
->getElementType()->isIntegerTy(8)) {
216 // If we're on a non-Darwin platform or we're inside of a structure, don't
217 // add stack protectors unless the array is a character array.
218 // However, in strong mode any array, regardless of type and size,
219 // triggers a protector.
220 if (!Strong
&& (InStruct
|| !Triple(M
->getTargetTriple()).isOSDarwin()))
224 // If an array has more than SSPBufferSize bytes of allocated space, then we
225 // emit stack protectors.
226 if (SSPBufferSize
<= M
->getDataLayout().getTypeAllocSize(AT
)) {
232 // Require a protector for all arrays in strong mode
236 const StructType
*ST
= dyn_cast
<StructType
>(Ty
);
240 bool NeedsProtector
= false;
241 for (Type
*ET
: ST
->elements())
242 if (ContainsProtectableArray(ET
, M
, SSPBufferSize
, IsLarge
, Strong
, true)) {
243 // If the element is a protectable array and is large (>= SSPBufferSize)
244 // then we are done. If the protectable array is not large, then
245 // keep looking in case a subsequent element is a large array.
248 NeedsProtector
= true;
251 return NeedsProtector
;
254 /// Check whether a stack allocation has its address taken.
255 static bool HasAddressTaken(const Instruction
*AI
, TypeSize AllocSize
,
257 SmallPtrSet
<const PHINode
*, 16> &VisitedPHIs
) {
258 const DataLayout
&DL
= M
->getDataLayout();
259 for (const User
*U
: AI
->users()) {
260 const auto *I
= cast
<Instruction
>(U
);
261 // If this instruction accesses memory make sure it doesn't access beyond
262 // the bounds of the allocated object.
263 std::optional
<MemoryLocation
> MemLoc
= MemoryLocation::getOrNone(I
);
264 if (MemLoc
&& MemLoc
->Size
.hasValue() &&
265 !TypeSize::isKnownGE(AllocSize
, MemLoc
->Size
.getValue()))
267 switch (I
->getOpcode()) {
268 case Instruction::Store
:
269 if (AI
== cast
<StoreInst
>(I
)->getValueOperand())
272 case Instruction::AtomicCmpXchg
:
273 // cmpxchg conceptually includes both a load and store from the same
274 // location. So, like store, the value being stored is what matters.
275 if (AI
== cast
<AtomicCmpXchgInst
>(I
)->getNewValOperand())
278 case Instruction::PtrToInt
:
279 if (AI
== cast
<PtrToIntInst
>(I
)->getOperand(0))
282 case Instruction::Call
: {
283 // Ignore intrinsics that do not become real instructions.
284 // TODO: Narrow this to intrinsics that have store-like effects.
285 const auto *CI
= cast
<CallInst
>(I
);
286 if (!CI
->isDebugOrPseudoInst() && !CI
->isLifetimeStartOrEnd())
290 case Instruction::Invoke
:
292 case Instruction::GetElementPtr
: {
293 // If the GEP offset is out-of-bounds, or is non-constant and so has to be
294 // assumed to be potentially out-of-bounds, then any memory access that
295 // would use it could also be out-of-bounds meaning stack protection is
297 const GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(I
);
298 unsigned IndexSize
= DL
.getIndexTypeSizeInBits(I
->getType());
299 APInt
Offset(IndexSize
, 0);
300 if (!GEP
->accumulateConstantOffset(DL
, Offset
))
302 TypeSize OffsetSize
= TypeSize::getFixed(Offset
.getLimitedValue());
303 if (!TypeSize::isKnownGT(AllocSize
, OffsetSize
))
305 // Adjust AllocSize to be the space remaining after this offset.
306 // We can't subtract a fixed size from a scalable one, so in that case
307 // assume the scalable value is of minimum size.
308 TypeSize NewAllocSize
=
309 TypeSize::getFixed(AllocSize
.getKnownMinValue()) - OffsetSize
;
310 if (HasAddressTaken(I
, NewAllocSize
, M
, VisitedPHIs
))
314 case Instruction::BitCast
:
315 case Instruction::Select
:
316 case Instruction::AddrSpaceCast
:
317 if (HasAddressTaken(I
, AllocSize
, M
, VisitedPHIs
))
320 case Instruction::PHI
: {
321 // Keep track of what PHI nodes we have already visited to ensure
322 // they are only visited once.
323 const auto *PN
= cast
<PHINode
>(I
);
324 if (VisitedPHIs
.insert(PN
).second
)
325 if (HasAddressTaken(PN
, AllocSize
, M
, VisitedPHIs
))
329 case Instruction::Load
:
330 case Instruction::AtomicRMW
:
331 case Instruction::Ret
:
332 // These instructions take an address operand, but have load-like or
333 // other innocuous behavior that should not trigger a stack protector.
334 // atomicrmw conceptually has both load and store semantics, but the
335 // value being stored must be integer; so if a pointer is being stored,
336 // we'll catch it in the PtrToInt case above.
339 // Conservatively return true for any instruction that takes an address
340 // operand, but is not handled above.
347 /// Search for the first call to the llvm.stackprotector intrinsic and return it
349 static const CallInst
*findStackProtectorIntrinsic(Function
&F
) {
350 for (const BasicBlock
&BB
: F
)
351 for (const Instruction
&I
: BB
)
352 if (const auto *II
= dyn_cast
<IntrinsicInst
>(&I
))
353 if (II
->getIntrinsicID() == Intrinsic::stackprotector
)
358 /// Check whether or not this function needs a stack protector based
359 /// upon the stack protector level.
361 /// We use two heuristics: a standard (ssp) and strong (sspstrong).
362 /// The standard heuristic which will add a guard variable to functions that
363 /// call alloca with a either a variable size or a size >= SSPBufferSize,
364 /// functions with character buffers larger than SSPBufferSize, and functions
365 /// with aggregates containing character buffers larger than SSPBufferSize. The
366 /// strong heuristic will add a guard variables to functions that call alloca
367 /// regardless of size, functions with any buffer regardless of type and size,
368 /// functions with aggregates that contain any buffer regardless of type and
369 /// size, and functions that contain stack-based variables that have had their
371 bool SSPLayoutAnalysis::requiresStackProtector(Function
*F
,
372 SSPLayoutMap
*Layout
) {
373 Module
*M
= F
->getParent();
375 bool NeedsProtector
= false;
377 // The set of PHI nodes visited when determining if a variable's reference has
378 // been taken. This set is maintained to ensure we don't visit the same PHI
379 // node multiple times.
380 SmallPtrSet
<const PHINode
*, 16> VisitedPHIs
;
382 unsigned SSPBufferSize
= F
->getFnAttributeAsParsedInteger(
383 "stack-protector-buffer-size", SSPLayoutInfo::DefaultSSPBufferSize
);
385 if (F
->hasFnAttribute(Attribute::SafeStack
))
388 // We are constructing the OptimizationRemarkEmitter on the fly rather than
389 // using the analysis pass to avoid building DominatorTree and LoopInfo which
390 // are not available this late in the IR pipeline.
391 OptimizationRemarkEmitter
ORE(F
);
393 if (F
->hasFnAttribute(Attribute::StackProtectReq
)) {
397 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorRequested", F
)
398 << "Stack protection applied to function "
399 << ore::NV("Function", F
)
400 << " due to a function attribute or command-line switch";
402 NeedsProtector
= true;
403 Strong
= true; // Use the same heuristic as strong to determine SSPLayout
404 } else if (F
->hasFnAttribute(Attribute::StackProtectStrong
))
406 else if (!F
->hasFnAttribute(Attribute::StackProtect
))
409 for (const BasicBlock
&BB
: *F
) {
410 for (const Instruction
&I
: BB
) {
411 if (const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(&I
)) {
412 if (AI
->isArrayAllocation()) {
413 auto RemarkBuilder
= [&]() {
414 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorAllocaOrArray",
416 << "Stack protection applied to function "
417 << ore::NV("Function", F
)
418 << " due to a call to alloca or use of a variable length "
421 if (const auto *CI
= dyn_cast
<ConstantInt
>(AI
->getArraySize())) {
422 if (CI
->getLimitedValue(SSPBufferSize
) >= SSPBufferSize
) {
423 // A call to alloca with size >= SSPBufferSize requires
428 std::make_pair(AI
, MachineFrameInfo::SSPLK_LargeArray
));
429 ORE
.emit(RemarkBuilder
);
430 NeedsProtector
= true;
432 // Require protectors for all alloca calls in strong mode.
436 std::make_pair(AI
, MachineFrameInfo::SSPLK_SmallArray
));
437 ORE
.emit(RemarkBuilder
);
438 NeedsProtector
= true;
441 // A call to alloca with a variable size requires protectors.
445 std::make_pair(AI
, MachineFrameInfo::SSPLK_LargeArray
));
446 ORE
.emit(RemarkBuilder
);
447 NeedsProtector
= true;
452 bool IsLarge
= false;
453 if (ContainsProtectableArray(AI
->getAllocatedType(), M
, SSPBufferSize
,
454 IsLarge
, Strong
, false)) {
457 Layout
->insert(std::make_pair(
458 AI
, IsLarge
? MachineFrameInfo::SSPLK_LargeArray
459 : MachineFrameInfo::SSPLK_SmallArray
));
461 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorBuffer", &I
)
462 << "Stack protection applied to function "
463 << ore::NV("Function", F
)
464 << " due to a stack allocated buffer or struct containing a "
467 NeedsProtector
= true;
473 AI
, M
->getDataLayout().getTypeAllocSize(AI
->getAllocatedType()),
478 Layout
->insert(std::make_pair(AI
, MachineFrameInfo::SSPLK_AddrOf
));
480 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorAddressTaken",
482 << "Stack protection applied to function "
483 << ore::NV("Function", F
)
484 << " due to the address of a local variable being taken";
486 NeedsProtector
= true;
488 // Clear any PHIs that we visited, to make sure we examine all uses of
489 // any subsequent allocas that we look at.
495 return NeedsProtector
;
498 /// Create a stack guard loading and populate whether SelectionDAG SSP is
500 static Value
*getStackGuard(const TargetLoweringBase
*TLI
, Module
*M
,
502 bool *SupportsSelectionDAGSP
= nullptr) {
503 Value
*Guard
= TLI
->getIRStackGuard(B
);
504 StringRef GuardMode
= M
->getStackProtectorGuard();
505 if ((GuardMode
== "tls" || GuardMode
.empty()) && Guard
)
506 return B
.CreateLoad(B
.getPtrTy(), Guard
, true, "StackGuard");
508 // Use SelectionDAG SSP handling, since there isn't an IR guard.
510 // This is more or less weird, since we optionally output whether we
511 // should perform a SelectionDAG SP here. The reason is that it's strictly
512 // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
513 // mutating. There is no way to get this bit without mutating the IR, so
514 // getting this bit has to happen in this right time.
516 // We could have define a new function TLI::supportsSelectionDAGSP(), but that
517 // will put more burden on the backends' overriding work, especially when it
518 // actually conveys the same information getIRStackGuard() already gives.
519 if (SupportsSelectionDAGSP
)
520 *SupportsSelectionDAGSP
= true;
521 TLI
->insertSSPDeclarations(*M
);
522 return B
.CreateCall(Intrinsic::getDeclaration(M
, Intrinsic::stackguard
));
525 /// Insert code into the entry block that stores the stack guard
526 /// variable onto the stack:
529 /// StackGuardSlot = alloca i8*
530 /// StackGuard = <stack guard>
531 /// call void @llvm.stackprotector(StackGuard, StackGuardSlot)
533 /// Returns true if the platform/triple supports the stackprotectorcreate pseudo
535 static bool CreatePrologue(Function
*F
, Module
*M
, Instruction
*CheckLoc
,
536 const TargetLoweringBase
*TLI
, AllocaInst
*&AI
) {
537 bool SupportsSelectionDAGSP
= false;
538 IRBuilder
<> B(&F
->getEntryBlock().front());
539 PointerType
*PtrTy
= PointerType::getUnqual(CheckLoc
->getContext());
540 AI
= B
.CreateAlloca(PtrTy
, nullptr, "StackGuardSlot");
542 Value
*GuardSlot
= getStackGuard(TLI
, M
, B
, &SupportsSelectionDAGSP
);
543 B
.CreateCall(Intrinsic::getDeclaration(M
, Intrinsic::stackprotector
),
545 return SupportsSelectionDAGSP
;
548 bool InsertStackProtectors(const TargetMachine
*TM
, Function
*F
,
549 DomTreeUpdater
*DTU
, bool &HasPrologue
,
551 auto *M
= F
->getParent();
552 auto *TLI
= TM
->getSubtargetImpl(*F
)->getTargetLowering();
554 // If the target wants to XOR the frame pointer into the guard value, it's
555 // impossible to emit the check in IR, so the target *must* support stack
556 // protection in SDAG.
557 bool SupportsSelectionDAGSP
=
558 TLI
->useStackGuardXorFP() ||
559 (EnableSelectionDAGSP
&& !TM
->Options
.EnableFastISel
);
560 AllocaInst
*AI
= nullptr; // Place on stack that stores the stack guard.
561 BasicBlock
*FailBB
= nullptr;
563 for (BasicBlock
&BB
: llvm::make_early_inc_range(*F
)) {
564 // This is stack protector auto generated check BB, skip it.
567 Instruction
*CheckLoc
= dyn_cast
<ReturnInst
>(BB
.getTerminator());
568 if (!CheckLoc
&& !DisableCheckNoReturn
)
569 for (auto &Inst
: BB
)
570 if (auto *CB
= dyn_cast
<CallBase
>(&Inst
))
571 // Do stack check before noreturn calls that aren't nounwind (e.g:
573 if (CB
->doesNotReturn() && !CB
->doesNotThrow()) {
581 // Generate prologue instrumentation if not already generated.
584 SupportsSelectionDAGSP
&= CreatePrologue(F
, M
, CheckLoc
, TLI
, AI
);
587 // SelectionDAG based code generation. Nothing else needs to be done here.
588 // The epilogue instrumentation is postponed to SelectionDAG.
589 if (SupportsSelectionDAGSP
)
592 // Find the stack guard slot if the prologue was not created by this pass
593 // itself via a previous call to CreatePrologue().
595 const CallInst
*SPCall
= findStackProtectorIntrinsic(*F
);
596 assert(SPCall
&& "Call to llvm.stackprotector is missing");
597 AI
= cast
<AllocaInst
>(SPCall
->getArgOperand(1));
600 // Set HasIRCheck to true, so that SelectionDAG will not generate its own
601 // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
602 // instrumentation has already been generated.
605 // If we're instrumenting a block with a tail call, the check has to be
606 // inserted before the call rather than between it and the return. The
607 // verifier guarantees that a tail call is either directly before the
608 // return or with a single correct bitcast of the return value in between so
609 // we don't need to worry about many situations here.
610 Instruction
*Prev
= CheckLoc
->getPrevNonDebugInstruction();
611 if (Prev
&& isa
<CallInst
>(Prev
) && cast
<CallInst
>(Prev
)->isTailCall())
614 Prev
= Prev
->getPrevNonDebugInstruction();
615 if (Prev
&& isa
<CallInst
>(Prev
) && cast
<CallInst
>(Prev
)->isTailCall())
619 // Generate epilogue instrumentation. The epilogue intrumentation can be
620 // function-based or inlined depending on which mechanism the target is
622 if (Function
*GuardCheck
= TLI
->getSSPStackGuardCheck(*M
)) {
623 // Generate the function-based epilogue instrumentation.
624 // The target provides a guard check function, generate a call to it.
625 IRBuilder
<> B(CheckLoc
);
626 LoadInst
*Guard
= B
.CreateLoad(B
.getPtrTy(), AI
, true, "Guard");
627 CallInst
*Call
= B
.CreateCall(GuardCheck
, {Guard
});
628 Call
->setAttributes(GuardCheck
->getAttributes());
629 Call
->setCallingConv(GuardCheck
->getCallingConv());
631 // Generate the epilogue with inline instrumentation.
632 // If we do not support SelectionDAG based calls, generate IR level
635 // For each block with a return instruction, convert this:
645 // %1 = <stack guard>
646 // %2 = load StackGuardSlot
647 // %3 = icmp ne i1 %1, %2
648 // br i1 %3, label %CallStackCheckFailBlk, label %SP_return
653 // CallStackCheckFailBlk:
654 // call void @__stack_chk_fail()
657 // Create the FailBB. We duplicate the BB every time since the MI tail
658 // merge pass will merge together all of the various BB into one including
659 // fail BB generated by the stack protector pseudo instruction.
661 FailBB
= CreateFailBB(F
, TM
->getTargetTriple());
663 IRBuilder
<> B(CheckLoc
);
664 Value
*Guard
= getStackGuard(TLI
, M
, B
);
665 LoadInst
*LI2
= B
.CreateLoad(B
.getPtrTy(), AI
, true);
666 auto *Cmp
= cast
<ICmpInst
>(B
.CreateICmpNE(Guard
, LI2
));
668 BranchProbabilityInfo::getBranchProbStackProtector(true);
670 BranchProbabilityInfo::getBranchProbStackProtector(false);
671 MDNode
*Weights
= MDBuilder(F
->getContext())
672 .createBranchWeights(FailureProb
.getNumerator(),
673 SuccessProb
.getNumerator());
675 SplitBlockAndInsertIfThen(Cmp
, CheckLoc
,
676 /*Unreachable=*/false, Weights
, DTU
,
677 /*LI=*/nullptr, /*ThenBlock=*/FailBB
);
679 auto *BI
= cast
<BranchInst
>(Cmp
->getParent()->getTerminator());
680 BasicBlock
*NewBB
= BI
->getSuccessor(1);
681 NewBB
->setName("SP_return");
682 NewBB
->moveAfter(&BB
);
684 Cmp
->setPredicate(Cmp
->getInversePredicate());
685 BI
->swapSuccessors();
689 // Return if we didn't modify any basic blocks. i.e., there are no return
690 // statements in the function.
694 BasicBlock
*CreateFailBB(Function
*F
, const Triple
&Trip
) {
695 auto *M
= F
->getParent();
696 LLVMContext
&Context
= F
->getContext();
697 BasicBlock
*FailBB
= BasicBlock::Create(Context
, "CallStackCheckFailBlk", F
);
698 IRBuilder
<> B(FailBB
);
699 if (F
->getSubprogram())
700 B
.SetCurrentDebugLocation(
701 DILocation::get(Context
, 0, 0, F
->getSubprogram()));
702 FunctionCallee StackChkFail
;
703 SmallVector
<Value
*, 1> Args
;
704 if (Trip
.isOSOpenBSD()) {
705 StackChkFail
= M
->getOrInsertFunction("__stack_smash_handler",
706 Type::getVoidTy(Context
),
707 PointerType::getUnqual(Context
));
708 Args
.push_back(B
.CreateGlobalStringPtr(F
->getName(), "SSH"));
711 M
->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context
));
713 cast
<Function
>(StackChkFail
.getCallee())->addFnAttr(Attribute::NoReturn
);
714 B
.CreateCall(StackChkFail
, Args
);
715 B
.CreateUnreachable();