1 //===- StackProtector.cpp - Stack Protector Insertion ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass inserts stack protectors into functions which need them. A variable
11 // with a random value in it is stored onto the stack before the local variables
12 // are allocated. Upon exiting the block, the stored value is checked. If it's
13 // changed, then there was some sort of violation and the program aborts.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/CodeGen/StackProtector.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/EHPersonalities.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/TargetLowering.h"
25 #include "llvm/CodeGen/TargetPassConfig.h"
26 #include "llvm/CodeGen/TargetSubtargetInfo.h"
27 #include "llvm/IR/Attributes.h"
28 #include "llvm/IR/BasicBlock.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DebugLoc.h"
33 #include "llvm/IR/DerivedTypes.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/MDBuilder.h"
42 #include "llvm/IR/Module.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/User.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/CommandLine.h"
48 #include "llvm/Target/TargetMachine.h"
49 #include "llvm/Target/TargetOptions.h"
54 #define DEBUG_TYPE "stack-protector"
56 STATISTIC(NumFunProtected
, "Number of functions protected");
57 STATISTIC(NumAddrTaken
, "Number of local variables that have their address"
60 static cl::opt
<bool> EnableSelectionDAGSP("enable-selectiondag-sp",
61 cl::init(true), cl::Hidden
);
63 char StackProtector::ID
= 0;
65 INITIALIZE_PASS_BEGIN(StackProtector
, DEBUG_TYPE
,
66 "Insert stack protectors", false, true)
67 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
68 INITIALIZE_PASS_END(StackProtector
, DEBUG_TYPE
,
69 "Insert stack protectors", false, true)
71 FunctionPass
*llvm::createStackProtectorPass() { return new StackProtector(); }
73 void StackProtector::getAnalysisUsage(AnalysisUsage
&AU
) const {
74 AU
.addRequired
<TargetPassConfig
>();
75 AU
.addPreserved
<DominatorTreeWrapperPass
>();
78 bool StackProtector::runOnFunction(Function
&Fn
) {
81 DominatorTreeWrapperPass
*DTWP
=
82 getAnalysisIfAvailable
<DominatorTreeWrapperPass
>();
83 DT
= DTWP
? &DTWP
->getDomTree() : nullptr;
84 TM
= &getAnalysis
<TargetPassConfig
>().getTM
<TargetMachine
>();
85 Trip
= TM
->getTargetTriple();
86 TLI
= TM
->getSubtargetImpl(Fn
)->getTargetLowering();
90 Attribute Attr
= Fn
.getFnAttribute("stack-protector-buffer-size");
91 if (Attr
.isStringAttribute() &&
92 Attr
.getValueAsString().getAsInteger(10, SSPBufferSize
))
93 return false; // Invalid integer string
95 if (!RequiresStackProtector())
98 // TODO(etienneb): Functions with funclets are not correctly supported now.
99 // Do nothing if this is funclet-based personality.
100 if (Fn
.hasPersonalityFn()) {
101 EHPersonality Personality
= classifyEHPersonality(Fn
.getPersonalityFn());
102 if (isFuncletEHPersonality(Personality
))
107 return InsertStackProtectors();
110 /// \param [out] IsLarge is set to true if a protectable array is found and
111 /// it is "large" ( >= ssp-buffer-size). In the case of a structure with
112 /// multiple arrays, this gets set if any of them is large.
113 bool StackProtector::ContainsProtectableArray(Type
*Ty
, bool &IsLarge
,
115 bool InStruct
) const {
118 if (ArrayType
*AT
= dyn_cast
<ArrayType
>(Ty
)) {
119 if (!AT
->getElementType()->isIntegerTy(8)) {
120 // If we're on a non-Darwin platform or we're inside of a structure, don't
121 // add stack protectors unless the array is a character array.
122 // However, in strong mode any array, regardless of type and size,
123 // triggers a protector.
124 if (!Strong
&& (InStruct
|| !Trip
.isOSDarwin()))
128 // If an array has more than SSPBufferSize bytes of allocated space, then we
129 // emit stack protectors.
130 if (SSPBufferSize
<= M
->getDataLayout().getTypeAllocSize(AT
)) {
136 // Require a protector for all arrays in strong mode
140 const StructType
*ST
= dyn_cast
<StructType
>(Ty
);
144 bool NeedsProtector
= false;
145 for (StructType::element_iterator I
= ST
->element_begin(),
146 E
= ST
->element_end();
148 if (ContainsProtectableArray(*I
, IsLarge
, Strong
, true)) {
149 // If the element is a protectable array and is large (>= SSPBufferSize)
150 // then we are done. If the protectable array is not large, then
151 // keep looking in case a subsequent element is a large array.
154 NeedsProtector
= true;
157 return NeedsProtector
;
160 static bool isLifetimeInst(const Instruction
*I
) {
161 if (const auto Intrinsic
= dyn_cast
<IntrinsicInst
>(I
)) {
162 const auto Id
= Intrinsic
->getIntrinsicID();
163 return Id
== Intrinsic::lifetime_start
|| Id
== Intrinsic::lifetime_end
;
168 bool StackProtector::HasAddressTaken(const Instruction
*AI
) {
169 for (const User
*U
: AI
->users()) {
170 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(U
)) {
171 if (AI
== SI
->getValueOperand())
173 } else if (const PtrToIntInst
*SI
= dyn_cast
<PtrToIntInst
>(U
)) {
174 if (AI
== SI
->getOperand(0))
176 } else if (const CallInst
*CI
= dyn_cast
<CallInst
>(U
)) {
177 // Ignore intrinsics that are not calls. TODO: Use isLoweredToCall().
178 if (!isa
<DbgInfoIntrinsic
>(CI
) && !isLifetimeInst(CI
))
180 } else if (isa
<InvokeInst
>(U
)) {
182 } else if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(U
)) {
183 if (HasAddressTaken(SI
))
185 } else if (const PHINode
*PN
= dyn_cast
<PHINode
>(U
)) {
186 // Keep track of what PHI nodes we have already visited to ensure
187 // they are only visited once.
188 if (VisitedPHIs
.insert(PN
).second
)
189 if (HasAddressTaken(PN
))
191 } else if (const GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(U
)) {
192 if (HasAddressTaken(GEP
))
194 } else if (const BitCastInst
*BI
= dyn_cast
<BitCastInst
>(U
)) {
195 if (HasAddressTaken(BI
))
202 /// Check whether or not this function needs a stack protector based
203 /// upon the stack protector level.
205 /// We use two heuristics: a standard (ssp) and strong (sspstrong).
206 /// The standard heuristic which will add a guard variable to functions that
207 /// call alloca with a either a variable size or a size >= SSPBufferSize,
208 /// functions with character buffers larger than SSPBufferSize, and functions
209 /// with aggregates containing character buffers larger than SSPBufferSize. The
210 /// strong heuristic will add a guard variables to functions that call alloca
211 /// regardless of size, functions with any buffer regardless of type and size,
212 /// functions with aggregates that contain any buffer regardless of type and
213 /// size, and functions that contain stack-based variables that have had their
215 bool StackProtector::RequiresStackProtector() {
217 bool NeedsProtector
= false;
218 for (const BasicBlock
&BB
: *F
)
219 for (const Instruction
&I
: BB
)
220 if (const CallInst
*CI
= dyn_cast
<CallInst
>(&I
))
221 if (CI
->getCalledFunction() ==
222 Intrinsic::getDeclaration(F
->getParent(),
223 Intrinsic::stackprotector
))
226 if (F
->hasFnAttribute(Attribute::SafeStack
))
229 // We are constructing the OptimizationRemarkEmitter on the fly rather than
230 // using the analysis pass to avoid building DominatorTree and LoopInfo which
231 // are not available this late in the IR pipeline.
232 OptimizationRemarkEmitter
ORE(F
);
234 if (F
->hasFnAttribute(Attribute::StackProtectReq
)) {
236 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorRequested", F
)
237 << "Stack protection applied to function "
238 << ore::NV("Function", F
)
239 << " due to a function attribute or command-line switch";
241 NeedsProtector
= true;
242 Strong
= true; // Use the same heuristic as strong to determine SSPLayout
243 } else if (F
->hasFnAttribute(Attribute::StackProtectStrong
))
245 else if (HasPrologue
)
246 NeedsProtector
= true;
247 else if (!F
->hasFnAttribute(Attribute::StackProtect
))
250 for (const BasicBlock
&BB
: *F
) {
251 for (const Instruction
&I
: BB
) {
252 if (const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(&I
)) {
253 if (AI
->isArrayAllocation()) {
254 auto RemarkBuilder
= [&]() {
255 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorAllocaOrArray",
257 << "Stack protection applied to function "
258 << ore::NV("Function", F
)
259 << " due to a call to alloca or use of a variable length "
262 if (const auto *CI
= dyn_cast
<ConstantInt
>(AI
->getArraySize())) {
263 if (CI
->getLimitedValue(SSPBufferSize
) >= SSPBufferSize
) {
264 // A call to alloca with size >= SSPBufferSize requires
266 Layout
.insert(std::make_pair(AI
,
267 MachineFrameInfo::SSPLK_LargeArray
));
268 ORE
.emit(RemarkBuilder
);
269 NeedsProtector
= true;
271 // Require protectors for all alloca calls in strong mode.
272 Layout
.insert(std::make_pair(AI
,
273 MachineFrameInfo::SSPLK_SmallArray
));
274 ORE
.emit(RemarkBuilder
);
275 NeedsProtector
= true;
278 // A call to alloca with a variable size requires protectors.
279 Layout
.insert(std::make_pair(AI
,
280 MachineFrameInfo::SSPLK_LargeArray
));
281 ORE
.emit(RemarkBuilder
);
282 NeedsProtector
= true;
287 bool IsLarge
= false;
288 if (ContainsProtectableArray(AI
->getAllocatedType(), IsLarge
, Strong
)) {
289 Layout
.insert(std::make_pair(AI
, IsLarge
290 ? MachineFrameInfo::SSPLK_LargeArray
291 : MachineFrameInfo::SSPLK_SmallArray
));
293 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorBuffer", &I
)
294 << "Stack protection applied to function "
295 << ore::NV("Function", F
)
296 << " due to a stack allocated buffer or struct containing a "
299 NeedsProtector
= true;
303 if (Strong
&& HasAddressTaken(AI
)) {
305 Layout
.insert(std::make_pair(AI
, MachineFrameInfo::SSPLK_AddrOf
));
307 return OptimizationRemark(DEBUG_TYPE
, "StackProtectorAddressTaken",
309 << "Stack protection applied to function "
310 << ore::NV("Function", F
)
311 << " due to the address of a local variable being taken";
313 NeedsProtector
= true;
319 return NeedsProtector
;
322 /// Create a stack guard loading and populate whether SelectionDAG SSP is
324 static Value
*getStackGuard(const TargetLoweringBase
*TLI
, Module
*M
,
326 bool *SupportsSelectionDAGSP
= nullptr) {
327 if (Value
*Guard
= TLI
->getIRStackGuard(B
))
328 return B
.CreateLoad(Guard
, true, "StackGuard");
330 // Use SelectionDAG SSP handling, since there isn't an IR guard.
332 // This is more or less weird, since we optionally output whether we
333 // should perform a SelectionDAG SP here. The reason is that it's strictly
334 // defined as !TLI->getIRStackGuard(B), where getIRStackGuard is also
335 // mutating. There is no way to get this bit without mutating the IR, so
336 // getting this bit has to happen in this right time.
338 // We could have define a new function TLI::supportsSelectionDAGSP(), but that
339 // will put more burden on the backends' overriding work, especially when it
340 // actually conveys the same information getIRStackGuard() already gives.
341 if (SupportsSelectionDAGSP
)
342 *SupportsSelectionDAGSP
= true;
343 TLI
->insertSSPDeclarations(*M
);
344 return B
.CreateCall(Intrinsic::getDeclaration(M
, Intrinsic::stackguard
));
347 /// Insert code into the entry block that stores the stack guard
348 /// variable onto the stack:
351 /// StackGuardSlot = alloca i8*
352 /// StackGuard = <stack guard>
353 /// call void @llvm.stackprotector(StackGuard, StackGuardSlot)
355 /// Returns true if the platform/triple supports the stackprotectorcreate pseudo
357 static bool CreatePrologue(Function
*F
, Module
*M
, ReturnInst
*RI
,
358 const TargetLoweringBase
*TLI
, AllocaInst
*&AI
) {
359 bool SupportsSelectionDAGSP
= false;
360 IRBuilder
<> B(&F
->getEntryBlock().front());
361 PointerType
*PtrTy
= Type::getInt8PtrTy(RI
->getContext());
362 AI
= B
.CreateAlloca(PtrTy
, nullptr, "StackGuardSlot");
364 Value
*GuardSlot
= getStackGuard(TLI
, M
, B
, &SupportsSelectionDAGSP
);
365 B
.CreateCall(Intrinsic::getDeclaration(M
, Intrinsic::stackprotector
),
367 return SupportsSelectionDAGSP
;
370 /// InsertStackProtectors - Insert code into the prologue and epilogue of the
373 /// - The prologue code loads and stores the stack guard onto the stack.
374 /// - The epilogue checks the value stored in the prologue against the original
375 /// value. It calls __stack_chk_fail if they differ.
376 bool StackProtector::InsertStackProtectors() {
377 // If the target wants to XOR the frame pointer into the guard value, it's
378 // impossible to emit the check in IR, so the target *must* support stack
379 // protection in SDAG.
380 bool SupportsSelectionDAGSP
=
381 TLI
->useStackGuardXorFP() ||
382 (EnableSelectionDAGSP
&& !TM
->Options
.EnableFastISel
);
383 AllocaInst
*AI
= nullptr; // Place on stack that stores the stack guard.
385 for (Function::iterator I
= F
->begin(), E
= F
->end(); I
!= E
;) {
386 BasicBlock
*BB
= &*I
++;
387 ReturnInst
*RI
= dyn_cast
<ReturnInst
>(BB
->getTerminator());
391 // Generate prologue instrumentation if not already generated.
394 SupportsSelectionDAGSP
&= CreatePrologue(F
, M
, RI
, TLI
, AI
);
397 // SelectionDAG based code generation. Nothing else needs to be done here.
398 // The epilogue instrumentation is postponed to SelectionDAG.
399 if (SupportsSelectionDAGSP
)
402 // Set HasIRCheck to true, so that SelectionDAG will not generate its own
403 // version. SelectionDAG called 'shouldEmitSDCheck' to check whether
404 // instrumentation has already been generated.
407 // Generate epilogue instrumentation. The epilogue intrumentation can be
408 // function-based or inlined depending on which mechanism the target is
410 if (Value
* GuardCheck
= TLI
->getSSPStackGuardCheck(*M
)) {
411 // Generate the function-based epilogue instrumentation.
412 // The target provides a guard check function, generate a call to it.
414 LoadInst
*Guard
= B
.CreateLoad(AI
, true, "Guard");
415 CallInst
*Call
= B
.CreateCall(GuardCheck
, {Guard
});
416 llvm::Function
*Function
= cast
<llvm::Function
>(GuardCheck
);
417 Call
->setAttributes(Function
->getAttributes());
418 Call
->setCallingConv(Function
->getCallingConv());
420 // Generate the epilogue with inline instrumentation.
421 // If we do not support SelectionDAG based tail calls, generate IR level
424 // For each block with a return instruction, convert this:
434 // %1 = <stack guard>
435 // %2 = load StackGuardSlot
436 // %3 = cmp i1 %1, %2
437 // br i1 %3, label %SP_return, label %CallStackCheckFailBlk
442 // CallStackCheckFailBlk:
443 // call void @__stack_chk_fail()
446 // Create the FailBB. We duplicate the BB every time since the MI tail
447 // merge pass will merge together all of the various BB into one including
448 // fail BB generated by the stack protector pseudo instruction.
449 BasicBlock
*FailBB
= CreateFailBB();
451 // Split the basic block before the return instruction.
452 BasicBlock
*NewBB
= BB
->splitBasicBlock(RI
->getIterator(), "SP_return");
454 // Update the dominator tree if we need to.
455 if (DT
&& DT
->isReachableFromEntry(BB
)) {
456 DT
->addNewBlock(NewBB
, BB
);
457 DT
->addNewBlock(FailBB
, BB
);
460 // Remove default branch instruction to the new BB.
461 BB
->getTerminator()->eraseFromParent();
463 // Move the newly created basic block to the point right after the old
464 // basic block so that it's in the "fall through" position.
465 NewBB
->moveAfter(BB
);
467 // Generate the stack protector instructions in the old basic block.
469 Value
*Guard
= getStackGuard(TLI
, M
, B
);
470 LoadInst
*LI2
= B
.CreateLoad(AI
, true);
471 Value
*Cmp
= B
.CreateICmpEQ(Guard
, LI2
);
473 BranchProbabilityInfo::getBranchProbStackProtector(true);
475 BranchProbabilityInfo::getBranchProbStackProtector(false);
476 MDNode
*Weights
= MDBuilder(F
->getContext())
477 .createBranchWeights(SuccessProb
.getNumerator(),
478 FailureProb
.getNumerator());
479 B
.CreateCondBr(Cmp
, NewBB
, FailBB
, Weights
);
483 // Return if we didn't modify any basic blocks. i.e., there are no return
484 // statements in the function.
488 /// CreateFailBB - Create a basic block to jump to when the stack protector
490 BasicBlock
*StackProtector::CreateFailBB() {
491 LLVMContext
&Context
= F
->getContext();
492 BasicBlock
*FailBB
= BasicBlock::Create(Context
, "CallStackCheckFailBlk", F
);
493 IRBuilder
<> B(FailBB
);
494 B
.SetCurrentDebugLocation(DebugLoc::get(0, 0, F
->getSubprogram()));
495 if (Trip
.isOSOpenBSD()) {
496 Constant
*StackChkFail
=
497 M
->getOrInsertFunction("__stack_smash_handler",
498 Type::getVoidTy(Context
),
499 Type::getInt8PtrTy(Context
));
501 B
.CreateCall(StackChkFail
, B
.CreateGlobalStringPtr(F
->getName(), "SSH"));
503 Constant
*StackChkFail
=
504 M
->getOrInsertFunction("__stack_chk_fail", Type::getVoidTy(Context
));
506 B
.CreateCall(StackChkFail
, {});
508 B
.CreateUnreachable();
512 bool StackProtector::shouldEmitSDCheck(const BasicBlock
&BB
) const {
513 return HasPrologue
&& !HasIRCheck
&& dyn_cast
<ReturnInst
>(BB
.getTerminator());
516 void StackProtector::copyToMachineFrameInfo(MachineFrameInfo
&MFI
) const {
520 for (int I
= 0, E
= MFI
.getObjectIndexEnd(); I
!= E
; ++I
) {
521 if (MFI
.isDeadObjectIndex(I
))
524 const AllocaInst
*AI
= MFI
.getObjectAllocation(I
);
528 SSPLayoutMap::const_iterator LI
= Layout
.find(AI
);
529 if (LI
== Layout
.end())
532 MFI
.setObjectSSPLayout(I
, LI
->second
);