1 //===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass inserts stack protectors into functions which need them. A variable
10 // with a random value in it is stored onto the stack before the local variables
11 // are allocated. Upon exiting the block, the stored value is checked. If it's
12 // changed, then there was some sort of violation and the program aborts.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/CodeGen/StackProtector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/MemoryLocation.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Dominators.h"
33 #include "llvm/IR/EHPersonalities.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/MDBuilder.h"
41 #include "llvm/IR/Module.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/IR/User.h"
44 #include "llvm/InitializePasses.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include "llvm/Target/TargetOptions.h"
50 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
56 #define DEBUG_TYPE "stack-protector"
58 STATISTIC(NumFunProtected
, "Number of functions protected");
59 STATISTIC(NumAddrTaken
, "Number of local variables that have their address"
62 static cl::opt
<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
63 cl::init(true), cl::Hidden
);
64 static cl::opt
<bool> DisableCheckNoReturn("disable-check-noreturn-call",
65 cl::init(false), cl::Hidden
);
67 char StackProtector::ID
= 0;
69 StackProtector::StackProtector() : FunctionPass(ID
) {
70 initializeStackProtectorPass(*PassRegistry::getPassRegistry());
73 INITIALIZE_PASS_BEGIN(StackProtector
, DEBUG_TYPE
,
74 "Insert stack protectors", false, true)
75 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
76 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
77 INITIALIZE_PASS_END(StackProtector
, DEBUG_TYPE
,
78 "Insert stack protectors", false, true)
80 FunctionPass
*llvm::createStackProtectorPass() { return new StackProtector(); }
82 void StackProtector::getAnalysisUsage(AnalysisUsage
&AU
) const {
83 AU
.addRequired
<TargetPassConfig
>();
84 AU
.addPreserved
<DominatorTreeWrapperPass
>();
87 bool StackProtector::runOnFunction(Function
&Fn
) {
90 if (auto *DTWP
= getAnalysisIfAvailable
<DominatorTreeWrapperPass
>())
91 DTU
.emplace(DTWP
->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy
);
92 TM
= &getAnalysis
<TargetPassConfig
>().getTM
<TargetMachine
>();
93 Trip
= TM
->getTargetTriple();
94 TLI
= TM
->getSubtargetImpl(Fn
)->getTargetLowering();
98 SSPBufferSize
= Fn
.getFnAttributeAsParsedInteger(
99 "stack-protector-buffer-size", DefaultSSPBufferSize
);
100 if (!requiresStackProtector(F
, &Layout
))
103 // TODO(etienneb): Functions with funclets are not correctly supported now.
104 // Do nothing if this is funclet-based personality.
105 if (Fn
.hasPersonalityFn()) {
106 EHPersonality Personality
= classifyEHPersonality(Fn
.getPersonalityFn());
107 if (isFuncletEHPersonality(Personality
))
112 bool Changed
= InsertStackProtectors();
113 #ifdef EXPENSIVE_CHECKS
115 DTU
->getDomTree().verify(DominatorTree::VerificationLevel::Full
)) &&
116 "Failed to maintain validity of domtree!");
122 /// \param [out] IsLarge is set to true if a protectable array is found and
123 /// it is "large" ( >= ssp-buffer-size). In the case of a structure with
124 /// multiple arrays, this gets set if any of them is large.
125 static bool ContainsProtectableArray(Type
*Ty
, Module
*M
, unsigned SSPBufferSize
,
126 bool &IsLarge
, bool Strong
,
130 if (ArrayType
*AT
= dyn_cast
<ArrayType
>(Ty
)) {
131 if (!AT
->getElementType()->isIntegerTy(8)) {
132 // If we're on a non-Darwin platform or we're inside of a structure, don't
133 // add stack protectors unless the array is a character array.
134 // However, in strong mode any array, regardless of type and size,
135 // triggers a protector.
136 if (!Strong
&& (InStruct
|| !Triple(M
->getTargetTriple()).isOSDarwin()))
140 // If an array has more than SSPBufferSize bytes of allocated space, then we
141 // emit stack protectors.
142 if (SSPBufferSize
<= M
->getDataLayout().getTypeAllocSize(AT
)) {
148 // Require a protector for all arrays in strong mode
152 const StructType
*ST
= dyn_cast
<StructType
>(Ty
);
156 bool NeedsProtector
= false;
157 for (Type
*ET
: ST
->elements())
158 if (ContainsProtectableArray(ET
, M
, SSPBufferSize
, IsLarge
, Strong
, true)) {
159 // If the element is a protectable array and is large (>= SSPBufferSize)
160 // then we are done. If the protectable array is not large, then
161 // keep looking in case a subsequent element is a large array.
164 NeedsProtector
= true;
167 return NeedsProtector
;
170 /// Check whether a stack allocation has its address taken.
171 static bool HasAddressTaken(const Instruction
*AI
, TypeSize AllocSize
,
173 SmallPtrSet
<const PHINode
*, 16> &VisitedPHIs
) {
174 const DataLayout
&DL
= M
->getDataLayout();
175 for (const User
*U
: AI
->users()) {
176 const auto *I
= cast
<Instruction
>(U
);
177 // If this instruction accesses memory make sure it doesn't access beyond
178 // the bounds of the allocated object.
179 std::optional
<MemoryLocation
> MemLoc
= MemoryLocation::getOrNone(I
);
180 if (MemLoc
&& MemLoc
->Size
.hasValue() &&
181 !TypeSize::isKnownGE(AllocSize
, MemLoc
->Size
.getValue()))
183 switch (I
->getOpcode()) {
184 case Instruction::Store
:
185 if (AI
== cast
<StoreInst
>(I
)->getValueOperand())
188 case Instruction::AtomicCmpXchg
:
189 // cmpxchg conceptually includes both a load and store from the same
190 // location. So, like store, the value being stored is what matters.
191 if (AI
== cast
<AtomicCmpXchgInst
>(I
)->getNewValOperand())
194 case Instruction::PtrToInt
:
195 if (AI
== cast
<PtrToIntInst
>(I
)->getOperand(0))
198 case Instruction::Call
: {
199 // Ignore intrinsics that do not become real instructions.
200 // TODO: Narrow this to intrinsics that have store-like effects.
201 const auto *CI
= cast
<CallInst
>(I
);
202 if (!CI
->isDebugOrPseudoInst() && !CI
->isLifetimeStartOrEnd())
206 case Instruction::Invoke
:
208 case Instruction::GetElementPtr
: {
209 // If the GEP offset is out-of-bounds, or is non-constant and so has to be
210 // assumed to be potentially out-of-bounds, then any memory access that
211 // would use it could also be out-of-bounds meaning stack protection is
213 const GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(I
);
214 unsigned IndexSize
= DL
.getIndexTypeSizeInBits(I
->getType());
215 APInt
Offset(IndexSize
, 0);
216 if (!GEP
->accumulateConstantOffset(DL
, Offset
))
218 TypeSize OffsetSize
= TypeSize::Fixed(Offset
.getLimitedValue());
219 if (!TypeSize::isKnownGT(AllocSize
, OffsetSize
))
221 // Adjust AllocSize to be the space remaining after this offset.
222 // We can't subtract a fixed size from a scalable one, so in that case
223 // assume the scalable value is of minimum size.
224 TypeSize NewAllocSize
=
225 TypeSize::Fixed(AllocSize
.getKnownMinValue()) - OffsetSize
;
226 if (HasAddressTaken(I
, NewAllocSize
, M
, VisitedPHIs
))
230 case Instruction::BitCast
:
231 case Instruction::Select
:
232 case Instruction::AddrSpaceCast
:
233 if (HasAddressTaken(I
, AllocSize
, M
, VisitedPHIs
))
236 case Instruction::PHI
: {
237 // Keep track of what PHI nodes we have already visited to ensure
238 // they are only visited once.
239 const auto *PN
= cast
<PHINode
>(I
);
240 if (VisitedPHIs
.insert(PN
).second
)
241 if (HasAddressTaken(PN
, AllocSize
, M
, VisitedPHIs
))
245 case Instruction::Load
:
246 case Instruction::AtomicRMW
:
247 case Instruction::Ret
:
248 // These instructions take an address operand, but have load-like or
249 // other innocuous behavior that should not trigger a stack protector.
250 // atomicrmw conceptually has both load and store semantics, but the
251 // value being stored must be integer; so if a pointer is being stored,
252 // we'll catch it in the PtrToInt case above.
255 // Conservatively return true for any instruction that takes an address
256 // operand, but is not handled above.
263 /// Search for the first call to the llvm.stackprotector intrinsic and return it
265 static const CallInst
*findStackProtectorIntrinsic(Function
&F
) {
266 for (const BasicBlock
&BB
: F
)
267 for (const Instruction
&I
: BB
)
268 if (const auto *II
= dyn_cast
<IntrinsicInst
>(&I
))
269 if (II
->getIntrinsicID() == Intrinsic::stackprotector
)
274 /// Check whether or not this function needs a stack protector based
275 /// upon the stack protector level.
277 /// We use two heuristics: a standard (ssp) and strong (sspstrong).
278 /// The standard heuristic which will add a guard variable to functions that
279 /// call alloca with a either a variable size or a size >= SSPBufferSize,
280 /// functions with character buffers larger than SSPBufferSize, and functions
281 /// with aggregates containing character buffers larger than SSPBufferSize. The
282 /// strong heuristic will add a guard variables to functions that call alloca
283 /// regardless of size, functions with any buffer regardless of type and size,
284 /// functions with aggregates that contain any buffer regardless of type and
285 /// size, and functions that contain stack-based variables that have had their
287 bool StackProtector::requiresStackProtector(Function
*F
, SSPLayoutMap
*Layout
) {
288 Module
*M
= F
->getParent();
290 bool NeedsProtector
= false;
292 // The set of PHI nodes visited when determining if a variable's reference has
293 // been taken. This set is maintained to ensure we don't visit the same PHI
294 // node multiple times.
295 SmallPtrSet
<const PHINode
*, 16> VisitedPHIs
;
297 unsigned SSPBufferSize
= F
->getFnAttributeAsParsedInteger(
298 "stack-protector-buffer-size", DefaultSSPBufferSize
);
300 if (F
->hasFnAttribute(Attribute::SafeStack
))
303 // We are constructing the OptimizationRemarkEmitter on the fly rather than
304 // using the analysis pass to avoid building DominatorTree and LoopInfo which
305 // are not available this late in the IR pipeline.
306 OptimizationRemarkEmitter
ORE(F
);
308 if (F
->hasFnAttribute(Attribute::StackProtectReq
)) {
312 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorRequested", F
)
313 << "Stack protection applied to function "
314 << ore::NV("Function", F
)
315 << " due to a function attribute or command-line switch";
317 NeedsProtector
= true;
318 Strong
= true; // Use the same heuristic as strong to determine SSPLayout
319 } else if (F
->hasFnAttribute(Attribute::StackProtectStrong
))
321 else if (!F
->hasFnAttribute(Attribute::StackProtect
))
324 for (const BasicBlock
&BB
: *F
) {
325 for (const Instruction
&I
: BB
) {
326 if (const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(&I
)) {
327 if (AI
->isArrayAllocation()) {
328 auto RemarkBuilder
= [&]() {
329 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorAllocaOrArray",
331 << "Stack protection applied to function "
332 << ore::NV("Function", F
)
333 << " due to a call to alloca or use of a variable length "
336 if (const auto *CI
= dyn_cast
<ConstantInt
>(AI
->getArraySize())) {
337 if (CI
->getLimitedValue(SSPBufferSize
) >= SSPBufferSize
) {
338 // A call to alloca with size >= SSPBufferSize requires
343 std::make_pair(AI
, MachineFrameInfo::SSPLK_LargeArray
));
344 ORE
.emit(RemarkBuilder
);
345 NeedsProtector
= true;
347 // Require protectors for all alloca calls in strong mode.
351 std::make_pair(AI
, MachineFrameInfo::SSPLK_SmallArray
));
352 ORE
.emit(RemarkBuilder
);
353 NeedsProtector
= true;
356 // A call to alloca with a variable size requires protectors.
360 std::make_pair(AI
, MachineFrameInfo::SSPLK_LargeArray
));
361 ORE
.emit(RemarkBuilder
);
362 NeedsProtector
= true;
367 bool IsLarge
= false;
368 if (ContainsProtectableArray(AI
->getAllocatedType(), M
, SSPBufferSize
,
369 IsLarge
, Strong
, false)) {
372 Layout
->insert(std::make_pair(
373 AI
, IsLarge
? MachineFrameInfo::SSPLK_LargeArray
374 : MachineFrameInfo::SSPLK_SmallArray
));
376 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorBuffer", &I
)
377 << "Stack protection applied to function "
378 << ore::NV("Function", F
)
379 << " due to a stack allocated buffer or struct containing a "
382 NeedsProtector
= true;
388 AI
, M
->getDataLayout().getTypeAllocSize(AI
->getAllocatedType()),
393 Layout
->insert(std::make_pair(AI
, MachineFrameInfo::SSPLK_AddrOf
));
395 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorAddressTaken",
397 << "Stack protection applied to function "
398 << ore::NV("Function", F
)
399 << " due to the address of a local variable being taken";
401 NeedsProtector
= true;
403 // Clear any PHIs that we visited, to make sure we examine all uses of
404 // any subsequent allocas that we look at.
410 return NeedsProtector
;
413 /// Create a stack guard loading and populate whether SelectionDAG SSP is
415 static Value
*getStackGuard(const TargetLoweringBase
*TLI
, Module
*M
,
417 bool *SupportsSelectionDAGSP
= nullptr) {
418 Value
*Guard
= TLI
->getIRStackGuard(B
);
419 StringRef GuardMode
= M
->getStackProtectorGuard();
420 if ((GuardMode
== "tls" || GuardMode
.empty()) && Guard
)
421 return B
.CreateLoad(B
.getInt8PtrTy(), Guard
, true, "StackGuard");
423 // Use SelectionDAG SSP handling, since there isn't an IR guard.
425 // This is more or less weird, since we optionally output whether we
426 // should perform a SelectionDAG SP here. The reason is that it's strictly
427 // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
428 // mutating. There is no way to get this bit without mutating the IR, so
429 // getting this bit has to happen in this right time.
431 // We could have define a new function TLI::supportsSelectionDAGSP(), but that
432 // will put more burden on the backends' overriding work, especially when it
433 // actually conveys the same information getIRStackGuard() already gives.
434 if (SupportsSelectionDAGSP
)
435 *SupportsSelectionDAGSP
= true;
436 TLI
->insertSSPDeclarations(*M
);
437 return B
.CreateCall(Intrinsic::getDeclaration(M
, Intrinsic::stackguard
));
440 /// Insert code into the entry block that stores the stack guard
441 /// variable onto the stack:
444 /// StackGuardSlot = alloca i8*
445 /// StackGuard = <stack guard>
446 /// call void @llvm.stackprotector(StackGuard, StackGuardSlot)
448 /// Returns true if the platform/triple supports the stackprotectorcreate pseudo
450 static bool CreatePrologue(Function
*F
, Module
*M
, Instruction
*CheckLoc
,
451 const TargetLoweringBase
*TLI
, AllocaInst
*&AI
) {
452 bool SupportsSelectionDAGSP
= false;
453 IRBuilder
<> B(&F
->getEntryBlock().front());
454 PointerType
*PtrTy
= Type::getInt8PtrTy(CheckLoc
->getContext());
455 AI
= B
.CreateAlloca(PtrTy
, nullptr, "StackGuardSlot");
457 Value
*GuardSlot
= getStackGuard(TLI
, M
, B
, &SupportsSelectionDAGSP
);
458 B
.CreateCall(Intrinsic::getDeclaration(M
, Intrinsic::stackprotector
),
460 return SupportsSelectionDAGSP
;
463 /// InsertStackProtectors - Insert code into the prologue and epilogue of the
466 /// - The prologue code loads and stores the stack guard onto the stack.
467 /// - The epilogue checks the value stored in the prologue against the original
468 /// value. It calls __stack_chk_fail if they differ.
469 bool StackProtector::InsertStackProtectors() {
470 // If the target wants to XOR the frame pointer into the guard value, it's
471 // impossible to emit the check in IR, so the target *must* support stack
472 // protection in SDAG.
473 bool SupportsSelectionDAGSP
=
474 TLI
->useStackGuardXorFP() ||
475 (EnableSelectionDAGSP
&& !TM
->Options
.EnableFastISel
);
476 AllocaInst
*AI
= nullptr; // Place on stack that stores the stack guard.
477 BasicBlock
*FailBB
= nullptr;
479 for (BasicBlock
&BB
: llvm::make_early_inc_range(*F
)) {
480 // This is stack protector auto generated check BB, skip it.
483 Instruction
*CheckLoc
= dyn_cast
<ReturnInst
>(BB
.getTerminator());
484 if (!CheckLoc
&& !DisableCheckNoReturn
)
485 for (auto &Inst
: BB
)
486 if (auto *CB
= dyn_cast
<CallBase
>(&Inst
))
487 // Do stack check before noreturn calls that aren't nounwind (e.g:
489 if (CB
->doesNotReturn() && !CB
->doesNotThrow()) {
497 // Generate prologue instrumentation if not already generated.
500 SupportsSelectionDAGSP
&= CreatePrologue(F
, M
, CheckLoc
, TLI
, AI
);
503 // SelectionDAG based code generation. Nothing else needs to be done here.
504 // The epilogue instrumentation is postponed to SelectionDAG.
505 if (SupportsSelectionDAGSP
)
508 // Find the stack guard slot if the prologue was not created by this pass
509 // itself via a previous call to CreatePrologue().
511 const CallInst
*SPCall
= findStackProtectorIntrinsic(*F
);
512 assert(SPCall
&& "Call to llvm.stackprotector is missing");
513 AI
= cast
<AllocaInst
>(SPCall
->getArgOperand(1));
516 // Set HasIRCheck to true, so that SelectionDAG will not generate its own
517 // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
518 // instrumentation has already been generated.
521 // If we're instrumenting a block with a tail call, the check has to be
522 // inserted before the call rather than between it and the return. The
523 // verifier guarantees that a tail call is either directly before the
524 // return or with a single correct bitcast of the return value in between so
525 // we don't need to worry about many situations here.
526 Instruction
*Prev
= CheckLoc
->getPrevNonDebugInstruction();
527 if (Prev
&& isa
<CallInst
>(Prev
) && cast
<CallInst
>(Prev
)->isTailCall())
530 Prev
= Prev
->getPrevNonDebugInstruction();
531 if (Prev
&& isa
<CallInst
>(Prev
) && cast
<CallInst
>(Prev
)->isTailCall())
535 // Generate epilogue instrumentation. The epilogue intrumentation can be
536 // function-based or inlined depending on which mechanism the target is
538 if (Function
*GuardCheck
= TLI
->getSSPStackGuardCheck(*M
)) {
539 // Generate the function-based epilogue instrumentation.
540 // The target provides a guard check function, generate a call to it.
541 IRBuilder
<> B(CheckLoc
);
542 LoadInst
*Guard
= B
.CreateLoad(B
.getInt8PtrTy(), AI
, true, "Guard");
543 CallInst
*Call
= B
.CreateCall(GuardCheck
, {Guard
});
544 Call
->setAttributes(GuardCheck
->getAttributes());
545 Call
->setCallingConv(GuardCheck
->getCallingConv());
547 // Generate the epilogue with inline instrumentation.
548 // If we do not support SelectionDAG based calls, generate IR level
551 // For each block with a return instruction, convert this:
561 // %1 = <stack guard>
562 // %2 = load StackGuardSlot
563 // %3 = icmp ne i1 %1, %2
564 // br i1 %3, label %CallStackCheckFailBlk, label %SP_return
569 // CallStackCheckFailBlk:
570 // call void @__stack_chk_fail()
573 // Create the FailBB. We duplicate the BB every time since the MI tail
574 // merge pass will merge together all of the various BB into one including
575 // fail BB generated by the stack protector pseudo instruction.
577 FailBB
= CreateFailBB();
579 IRBuilder
<> B(CheckLoc
);
580 Value
*Guard
= getStackGuard(TLI
, M
, B
);
581 LoadInst
*LI2
= B
.CreateLoad(B
.getInt8PtrTy(), AI
, true);
582 auto *Cmp
= cast
<ICmpInst
>(B
.CreateICmpNE(Guard
, LI2
));
584 BranchProbabilityInfo::getBranchProbStackProtector(true);
586 BranchProbabilityInfo::getBranchProbStackProtector(false);
587 MDNode
*Weights
= MDBuilder(F
->getContext())
588 .createBranchWeights(FailureProb
.getNumerator(),
589 SuccessProb
.getNumerator());
591 SplitBlockAndInsertIfThen(Cmp
, CheckLoc
,
592 /*Unreachable=*/false, Weights
,
593 DTU
? &*DTU
: nullptr,
594 /*LI=*/nullptr, /*ThenBlock=*/FailBB
);
596 auto *BI
= cast
<BranchInst
>(Cmp
->getParent()->getTerminator());
597 BasicBlock
*NewBB
= BI
->getSuccessor(1);
598 NewBB
->setName("SP_return");
599 NewBB
->moveAfter(&BB
);
601 Cmp
->setPredicate(Cmp
->getInversePredicate());
602 BI
->swapSuccessors();
606 // Return if we didn't modify any basic blocks. i.e., there are no return
607 // statements in the function.
611 /// CreateFailBB - Create a basic block to jump to when the stack protector
613 BasicBlock
*StackProtector::CreateFailBB() {
614 LLVMContext
&Context
= F
->getContext();
615 BasicBlock
*FailBB
= BasicBlock::Create(Context
, "CallStackCheckFailBlk", F
);
616 IRBuilder
<> B(FailBB
);
617 if (F
->getSubprogram())
618 B
.SetCurrentDebugLocation(
619 DILocation::get(Context
, 0, 0, F
->getSubprogram()));
620 FunctionCallee StackChkFail
;
621 SmallVector
<Value
*, 1> Args
;
622 if (Trip
.isOSOpenBSD()) {
623 StackChkFail
= M
->getOrInsertFunction("__stack_smash_handler",
624 Type::getVoidTy(Context
),
625 Type::getInt8PtrTy(Context
));
626 Args
.push_back(B
.CreateGlobalStringPtr(F
->getName(), "SSH"));
629 M
->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context
));
631 cast
<Function
>(StackChkFail
.getCallee())->addFnAttr(Attribute::NoReturn
);
632 B
.CreateCall(StackChkFail
, Args
);
633 B
.CreateUnreachable();
637 bool StackProtector::shouldEmitSDCheck(const BasicBlock
&BB
) const {
638 return HasPrologue
&& !HasIRCheck
&& isa
<ReturnInst
>(BB
.getTerminator());
641 void StackProtector::copyToMachineFrameInfo(MachineFrameInfo
&MFI
) const {
645 for (int I
= 0, E
= MFI
.getObjectIndexEnd(); I
!= E
; ++I
) {
646 if (MFI
.isDeadObjectIndex(I
))
649 const AllocaInst
*AI
= MFI
.getObjectAllocation(I
);
653 SSPLayoutMap::const_iterator LI
= Layout
.find(AI
);
654 if (LI
== Layout
.end())
657 MFI
.setObjectSSPLayout(I
, LI
->second
);