1 //===- AArch64StackTagging.cpp - Stack tagging in IR --===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
8 //===----------------------------------------------------------------------===//
11 #include "AArch64Subtarget.h"
12 #include "llvm/ADT/APInt.h"
13 #include "llvm/ADT/MapVector.h"
14 #include "llvm/ADT/SmallVector.h"
15 #include "llvm/ADT/Statistic.h"
16 #include "llvm/Analysis/AliasAnalysis.h"
17 #include "llvm/Analysis/CFG.h"
18 #include "llvm/Analysis/LoopInfo.h"
19 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
20 #include "llvm/Analysis/PostDominators.h"
21 #include "llvm/Analysis/ScalarEvolution.h"
22 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
23 #include "llvm/Analysis/StackSafetyAnalysis.h"
24 #include "llvm/BinaryFormat/Dwarf.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/IR/DebugLoc.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/InstIterator.h"
34 #include "llvm/IR/Instruction.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/IR/IntrinsicInst.h"
37 #include "llvm/IR/IntrinsicsAArch64.h"
38 #include "llvm/IR/Metadata.h"
39 #include "llvm/IR/PassManager.h"
40 #include "llvm/InitializePasses.h"
41 #include "llvm/Pass.h"
42 #include "llvm/Support/Casting.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/Utils/Local.h"
46 #include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
53 #define DEBUG_TYPE "aarch64-stack-tagging"
55 static cl::opt
<bool> ClMergeInit(
56 "stack-tagging-merge-init", cl::Hidden
, cl::init(true),
57 cl::desc("merge stack variable initializers with tagging when possible"));
60 ClUseStackSafety("stack-tagging-use-stack-safety", cl::Hidden
,
62 cl::desc("Use Stack Safety analysis results"));
64 static cl::opt
<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
65 cl::init(40), cl::Hidden
);
67 static cl::opt
<unsigned>
68 ClMergeInitSizeLimit("stack-tagging-merge-init-size-limit", cl::init(272),
71 static cl::opt
<size_t> ClMaxLifetimes(
72 "stack-tagging-max-lifetimes-for-alloca", cl::Hidden
, cl::init(3),
74 cl::desc("How many lifetime ends to handle for a single alloca."),
77 // Mode for selecting how to insert frame record info into the stack ring
79 enum StackTaggingRecordStackHistoryMode
{
80 // Do not record frame record info.
83 // Insert instructions into the prologue for storing into the stack ring
88 static cl::opt
<StackTaggingRecordStackHistoryMode
> ClRecordStackHistory(
89 "stack-tagging-record-stack-history",
90 cl::desc("Record stack frames with tagged allocations in a thread-local "
92 cl::values(clEnumVal(none
, "Do not record stack ring history"),
93 clEnumVal(instr
, "Insert instructions into the prologue for "
94 "storing into the stack ring buffer")),
95 cl::Hidden
, cl::init(none
));
97 static const Align kTagGranuleSize
= Align(16);
101 class InitializerBuilder
{
103 const DataLayout
*DL
;
106 Function
*SetTagZeroFn
;
109 // List of initializers sorted by start offset.
114 SmallVector
<Range
, 4> Ranges
;
115 // 8-aligned offset => 8-byte initializer
116 // Missing keys are zero initialized.
117 std::map
<uint64_t, Value
*> Out
;
120 InitializerBuilder(uint64_t Size
, const DataLayout
*DL
, Value
*BasePtr
,
121 Function
*SetTagFn
, Function
*SetTagZeroFn
,
123 : Size(Size
), DL(DL
), BasePtr(BasePtr
), SetTagFn(SetTagFn
),
124 SetTagZeroFn(SetTagZeroFn
), StgpFn(StgpFn
) {}
126 bool addRange(uint64_t Start
, uint64_t End
, Instruction
*Inst
) {
128 llvm::lower_bound(Ranges
, Start
, [](const Range
&LHS
, uint64_t RHS
) {
129 return LHS
.End
<= RHS
;
131 if (I
!= Ranges
.end() && End
> I
->Start
) {
135 Ranges
.insert(I
, {Start
, End
, Inst
});
139 bool addStore(uint64_t Offset
, StoreInst
*SI
, const DataLayout
*DL
) {
140 int64_t StoreSize
= DL
->getTypeStoreSize(SI
->getOperand(0)->getType());
141 if (!addRange(Offset
, Offset
+ StoreSize
, SI
))
144 applyStore(IRB
, Offset
, Offset
+ StoreSize
, SI
->getOperand(0));
148 bool addMemSet(uint64_t Offset
, MemSetInst
*MSI
) {
149 uint64_t StoreSize
= cast
<ConstantInt
>(MSI
->getLength())->getZExtValue();
150 if (!addRange(Offset
, Offset
+ StoreSize
, MSI
))
152 IRBuilder
<> IRB(MSI
);
153 applyMemSet(IRB
, Offset
, Offset
+ StoreSize
,
154 cast
<ConstantInt
>(MSI
->getValue()));
158 void applyMemSet(IRBuilder
<> &IRB
, int64_t Start
, int64_t End
,
160 // Out[] does not distinguish between zero and undef, and we already know
161 // that this memset does not overlap with any other initializer. Nothing to
165 for (int64_t Offset
= Start
- Start
% 8; Offset
< End
; Offset
+= 8) {
166 uint64_t Cst
= 0x0101010101010101UL
;
167 int LowBits
= Offset
< Start
? (Start
- Offset
) * 8 : 0;
169 Cst
= (Cst
>> LowBits
) << LowBits
;
170 int HighBits
= End
- Offset
< 8 ? (8 - (End
- Offset
)) * 8 : 0;
172 Cst
= (Cst
<< HighBits
) >> HighBits
;
174 ConstantInt::get(IRB
.getInt64Ty(), Cst
* V
->getZExtValue());
176 Value
*&CurrentV
= Out
[Offset
];
180 CurrentV
= IRB
.CreateOr(CurrentV
, C
);
185 // Take a 64-bit slice of the value starting at the given offset (in bytes).
186 // Offset can be negative. Pad with zeroes on both sides when necessary.
187 Value
*sliceValue(IRBuilder
<> &IRB
, Value
*V
, int64_t Offset
) {
189 V
= IRB
.CreateLShr(V
, Offset
* 8);
190 V
= IRB
.CreateZExtOrTrunc(V
, IRB
.getInt64Ty());
191 } else if (Offset
< 0) {
192 V
= IRB
.CreateZExtOrTrunc(V
, IRB
.getInt64Ty());
193 V
= IRB
.CreateShl(V
, -Offset
* 8);
195 V
= IRB
.CreateZExtOrTrunc(V
, IRB
.getInt64Ty());
200 void applyStore(IRBuilder
<> &IRB
, int64_t Start
, int64_t End
,
201 Value
*StoredValue
) {
202 StoredValue
= flatten(IRB
, StoredValue
);
203 for (int64_t Offset
= Start
- Start
% 8; Offset
< End
; Offset
+= 8) {
204 Value
*V
= sliceValue(IRB
, StoredValue
, Offset
- Start
);
205 Value
*&CurrentV
= Out
[Offset
];
209 CurrentV
= IRB
.CreateOr(CurrentV
, V
);
214 void generate(IRBuilder
<> &IRB
) {
215 LLVM_DEBUG(dbgs() << "Combined initializer\n");
216 // No initializers => the entire allocation is undef.
217 if (Ranges
.empty()) {
218 emitUndef(IRB
, 0, Size
);
222 // Look through 8-byte initializer list 16 bytes at a time;
223 // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
224 // Otherwise, emit zeroes up to next available item.
225 uint64_t LastOffset
= 0;
226 for (uint64_t Offset
= 0; Offset
< Size
; Offset
+= 16) {
227 auto I1
= Out
.find(Offset
);
228 auto I2
= Out
.find(Offset
+ 8);
229 if (I1
== Out
.end() && I2
== Out
.end())
232 if (Offset
> LastOffset
)
233 emitZeroes(IRB
, LastOffset
, Offset
- LastOffset
);
235 Value
*Store1
= I1
== Out
.end() ? Constant::getNullValue(IRB
.getInt64Ty())
237 Value
*Store2
= I2
== Out
.end() ? Constant::getNullValue(IRB
.getInt64Ty())
239 emitPair(IRB
, Offset
, Store1
, Store2
);
240 LastOffset
= Offset
+ 16;
243 // memset(0) does not update Out[], therefore the tail can be either undef
245 if (LastOffset
< Size
)
246 emitZeroes(IRB
, LastOffset
, Size
- LastOffset
);
248 for (const auto &R
: Ranges
) {
249 R
.Inst
->eraseFromParent();
253 void emitZeroes(IRBuilder
<> &IRB
, uint64_t Offset
, uint64_t Size
) {
254 LLVM_DEBUG(dbgs() << " [" << Offset
<< ", " << Offset
+ Size
256 Value
*Ptr
= BasePtr
;
258 Ptr
= IRB
.CreateConstGEP1_32(IRB
.getInt8Ty(), Ptr
, Offset
);
259 IRB
.CreateCall(SetTagZeroFn
,
260 {Ptr
, ConstantInt::get(IRB
.getInt64Ty(), Size
)});
263 void emitUndef(IRBuilder
<> &IRB
, uint64_t Offset
, uint64_t Size
) {
264 LLVM_DEBUG(dbgs() << " [" << Offset
<< ", " << Offset
+ Size
266 Value
*Ptr
= BasePtr
;
268 Ptr
= IRB
.CreateConstGEP1_32(IRB
.getInt8Ty(), Ptr
, Offset
);
269 IRB
.CreateCall(SetTagFn
, {Ptr
, ConstantInt::get(IRB
.getInt64Ty(), Size
)});
272 void emitPair(IRBuilder
<> &IRB
, uint64_t Offset
, Value
*A
, Value
*B
) {
273 LLVM_DEBUG(dbgs() << " [" << Offset
<< ", " << Offset
+ 16 << "):\n");
274 LLVM_DEBUG(dbgs() << " " << *A
<< "\n " << *B
<< "\n");
275 Value
*Ptr
= BasePtr
;
277 Ptr
= IRB
.CreateConstGEP1_32(IRB
.getInt8Ty(), Ptr
, Offset
);
278 IRB
.CreateCall(StgpFn
, {Ptr
, A
, B
});
281 Value
*flatten(IRBuilder
<> &IRB
, Value
*V
) {
282 if (V
->getType()->isIntegerTy())
284 // vector of pointers -> vector of ints
285 if (VectorType
*VecTy
= dyn_cast
<VectorType
>(V
->getType())) {
286 LLVMContext
&Ctx
= IRB
.getContext();
287 Type
*EltTy
= VecTy
->getElementType();
288 if (EltTy
->isPointerTy()) {
289 uint32_t EltSize
= DL
->getTypeSizeInBits(EltTy
);
290 auto *NewTy
= FixedVectorType::get(
291 IntegerType::get(Ctx
, EltSize
),
292 cast
<FixedVectorType
>(VecTy
)->getNumElements());
293 V
= IRB
.CreatePointerCast(V
, NewTy
);
296 return IRB
.CreateBitOrPointerCast(
297 V
, IRB
.getIntNTy(DL
->getTypeStoreSize(V
->getType()) * 8));
301 class AArch64StackTagging
: public FunctionPass
{
302 const bool MergeInit
;
303 const bool UseStackSafety
;
306 static char ID
; // Pass ID, replacement for typeid
308 AArch64StackTagging(bool IsOptNone
= false)
310 MergeInit(ClMergeInit
.getNumOccurrences() ? ClMergeInit
: !IsOptNone
),
311 UseStackSafety(ClUseStackSafety
.getNumOccurrences() ? ClUseStackSafety
313 initializeAArch64StackTaggingPass(*PassRegistry::getPassRegistry());
316 void tagAlloca(AllocaInst
*AI
, Instruction
*InsertBefore
, Value
*Ptr
,
318 void untagAlloca(AllocaInst
*AI
, Instruction
*InsertBefore
, uint64_t Size
);
320 Instruction
*collectInitializers(Instruction
*StartInst
, Value
*StartPtr
,
321 uint64_t Size
, InitializerBuilder
&IB
);
323 Instruction
*insertBaseTaggedPointer(
325 const MapVector
<AllocaInst
*, memtag::AllocaInfo
> &Allocas
,
326 const DominatorTree
*DT
);
327 bool runOnFunction(Function
&F
) override
;
329 StringRef
getPassName() const override
{ return "AArch64 Stack Tagging"; }
332 Function
*F
= nullptr;
333 Function
*SetTagFunc
= nullptr;
334 const DataLayout
*DL
= nullptr;
335 AAResults
*AA
= nullptr;
336 const StackSafetyGlobalInfo
*SSI
= nullptr;
338 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
339 AU
.setPreservesCFG();
341 AU
.addRequired
<StackSafetyGlobalInfoWrapperPass
>();
343 AU
.addRequired
<AAResultsWrapperPass
>();
344 AU
.addRequired
<OptimizationRemarkEmitterWrapperPass
>();
348 } // end anonymous namespace
350 char AArch64StackTagging::ID
= 0;
352 INITIALIZE_PASS_BEGIN(AArch64StackTagging
, DEBUG_TYPE
, "AArch64 Stack Tagging",
354 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
355 INITIALIZE_PASS_DEPENDENCY(StackSafetyGlobalInfoWrapperPass
)
356 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass
)
357 INITIALIZE_PASS_END(AArch64StackTagging
, DEBUG_TYPE
, "AArch64 Stack Tagging",
360 FunctionPass
*llvm::createAArch64StackTaggingPass(bool IsOptNone
) {
361 return new AArch64StackTagging(IsOptNone
);
364 Instruction
*AArch64StackTagging::collectInitializers(Instruction
*StartInst
,
367 InitializerBuilder
&IB
) {
368 MemoryLocation AllocaLoc
{StartPtr
, Size
};
369 Instruction
*LastInst
= StartInst
;
370 BasicBlock::iterator
BI(StartInst
);
373 for (; Count
< ClScanLimit
&& !BI
->isTerminator(); ++BI
) {
374 if (!isa
<DbgInfoIntrinsic
>(*BI
))
377 if (isNoModRef(AA
->getModRefInfo(&*BI
, AllocaLoc
)))
380 if (!isa
<StoreInst
>(BI
) && !isa
<MemSetInst
>(BI
)) {
381 // If the instruction is readnone, ignore it, otherwise bail out. We
382 // don't even allow readonly here because we don't want something like:
383 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
384 if (BI
->mayWriteToMemory() || BI
->mayReadFromMemory())
389 if (StoreInst
*NextStore
= dyn_cast
<StoreInst
>(BI
)) {
390 if (!NextStore
->isSimple())
393 // Check to see if this store is to a constant offset from the start ptr.
394 std::optional
<int64_t> Offset
=
395 NextStore
->getPointerOperand()->getPointerOffsetFrom(StartPtr
, *DL
);
399 if (!IB
.addStore(*Offset
, NextStore
, DL
))
401 LastInst
= NextStore
;
403 MemSetInst
*MSI
= cast
<MemSetInst
>(BI
);
405 if (MSI
->isVolatile() || !isa
<ConstantInt
>(MSI
->getLength()))
408 if (!isa
<ConstantInt
>(MSI
->getValue()))
411 // Check to see if this store is to a constant offset from the start ptr.
412 std::optional
<int64_t> Offset
=
413 MSI
->getDest()->getPointerOffsetFrom(StartPtr
, *DL
);
417 if (!IB
.addMemSet(*Offset
, MSI
))
425 void AArch64StackTagging::tagAlloca(AllocaInst
*AI
, Instruction
*InsertBefore
,
426 Value
*Ptr
, uint64_t Size
) {
427 auto SetTagZeroFunc
= Intrinsic::getOrInsertDeclaration(
428 F
->getParent(), Intrinsic::aarch64_settag_zero
);
429 auto StgpFunc
= Intrinsic::getOrInsertDeclaration(F
->getParent(),
430 Intrinsic::aarch64_stgp
);
432 InitializerBuilder
IB(Size
, DL
, Ptr
, SetTagFunc
, SetTagZeroFunc
, StgpFunc
);
434 Triple(AI
->getModule()->getTargetTriple()).isLittleEndian();
435 // Current implementation of initializer merging assumes little endianness.
436 if (MergeInit
&& !F
->hasOptNone() && LittleEndian
&&
437 Size
< ClMergeInitSizeLimit
) {
438 LLVM_DEBUG(dbgs() << "collecting initializers for " << *AI
439 << ", size = " << Size
<< "\n");
440 InsertBefore
= collectInitializers(InsertBefore
, Ptr
, Size
, IB
);
443 IRBuilder
<> IRB(InsertBefore
);
447 void AArch64StackTagging::untagAlloca(AllocaInst
*AI
, Instruction
*InsertBefore
,
449 IRBuilder
<> IRB(InsertBefore
);
450 IRB
.CreateCall(SetTagFunc
, {IRB
.CreatePointerCast(AI
, IRB
.getPtrTy()),
451 ConstantInt::get(IRB
.getInt64Ty(), Size
)});
454 Instruction
*AArch64StackTagging::insertBaseTaggedPointer(
456 const MapVector
<AllocaInst
*, memtag::AllocaInfo
> &AllocasToInstrument
,
457 const DominatorTree
*DT
) {
458 BasicBlock
*PrologueBB
= nullptr;
459 // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
460 for (auto &I
: AllocasToInstrument
) {
461 const memtag::AllocaInfo
&Info
= I
.second
;
462 AllocaInst
*AI
= Info
.AI
;
464 PrologueBB
= AI
->getParent();
467 PrologueBB
= DT
->findNearestCommonDominator(PrologueBB
, AI
->getParent());
471 IRBuilder
<> IRB(&PrologueBB
->front());
473 IRB
.CreateIntrinsic(Intrinsic::aarch64_irg_sp
, {},
474 {Constant::getNullValue(IRB
.getInt64Ty())});
475 Base
->setName("basetag");
476 auto TargetTriple
= Triple(M
.getTargetTriple());
477 // This ABI will make it into Android API level 35.
478 // The ThreadLong format is the same as with HWASan, but the entries for
479 // stack MTE take two slots (16 bytes).
480 if (ClRecordStackHistory
== instr
&& TargetTriple
.isAndroid() &&
481 TargetTriple
.isAArch64() && !TargetTriple
.isAndroidVersionLT(35) &&
482 !AllocasToInstrument
.empty()) {
483 constexpr int StackMteSlot
= -3;
484 constexpr uint64_t TagMask
= 0xFULL
<< 56;
486 auto *IntptrTy
= IRB
.getIntPtrTy(M
.getDataLayout());
487 Value
*SlotPtr
= memtag::getAndroidSlotPtr(IRB
, StackMteSlot
);
488 auto *ThreadLong
= IRB
.CreateLoad(IntptrTy
, SlotPtr
);
489 Value
*FP
= memtag::getFP(IRB
);
490 Value
*Tag
= IRB
.CreateAnd(IRB
.CreatePtrToInt(Base
, IntptrTy
), TagMask
);
491 Value
*TaggedFP
= IRB
.CreateOr(FP
, Tag
);
492 Value
*PC
= memtag::getPC(TargetTriple
, IRB
);
493 Value
*RecordPtr
= IRB
.CreateIntToPtr(ThreadLong
, IRB
.getPtrTy(0));
494 IRB
.CreateStore(PC
, RecordPtr
);
495 IRB
.CreateStore(TaggedFP
, IRB
.CreateConstGEP1_64(IntptrTy
, RecordPtr
, 1));
497 IRB
.CreateStore(memtag::incrementThreadLong(IRB
, ThreadLong
, 16), SlotPtr
);
502 // FIXME: check for MTE extension
503 bool AArch64StackTagging::runOnFunction(Function
&Fn
) {
504 if (!Fn
.hasFnAttribute(Attribute::SanitizeMemTag
))
508 SSI
= &getAnalysis
<StackSafetyGlobalInfoWrapperPass
>().getResult();
510 DL
= &Fn
.getDataLayout();
512 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
513 OptimizationRemarkEmitter
&ORE
=
514 getAnalysis
<OptimizationRemarkEmitterWrapperPass
>().getORE();
516 memtag::StackInfoBuilder
SIB(SSI
, DEBUG_TYPE
);
517 for (Instruction
&I
: instructions(F
))
519 memtag::StackInfo
&SInfo
= SIB
.get();
521 if (SInfo
.AllocasToInstrument
.empty())
524 std::unique_ptr
<DominatorTree
> DeleteDT
;
525 DominatorTree
*DT
= nullptr;
526 if (auto *P
= getAnalysisIfAvailable
<DominatorTreeWrapperPass
>())
527 DT
= &P
->getDomTree();
530 DeleteDT
= std::make_unique
<DominatorTree
>(*F
);
534 std::unique_ptr
<PostDominatorTree
> DeletePDT
;
535 PostDominatorTree
*PDT
= nullptr;
536 if (auto *P
= getAnalysisIfAvailable
<PostDominatorTreeWrapperPass
>())
537 PDT
= &P
->getPostDomTree();
539 if (PDT
== nullptr) {
540 DeletePDT
= std::make_unique
<PostDominatorTree
>(*F
);
541 PDT
= DeletePDT
.get();
544 std::unique_ptr
<LoopInfo
> DeleteLI
;
545 LoopInfo
*LI
= nullptr;
546 if (auto *LIWP
= getAnalysisIfAvailable
<LoopInfoWrapperPass
>()) {
547 LI
= &LIWP
->getLoopInfo();
549 DeleteLI
= std::make_unique
<LoopInfo
>(*DT
);
553 SetTagFunc
= Intrinsic::getOrInsertDeclaration(F
->getParent(),
554 Intrinsic::aarch64_settag
);
557 insertBaseTaggedPointer(*Fn
.getParent(), SInfo
.AllocasToInstrument
, DT
);
559 unsigned int NextTag
= 0;
560 for (auto &I
: SInfo
.AllocasToInstrument
) {
561 memtag::AllocaInfo
&Info
= I
.second
;
562 assert(Info
.AI
&& SIB
.getAllocaInterestingness(*Info
.AI
) ==
563 llvm::memtag::AllocaInterestingness::kInteresting
);
564 memtag::alignAndPadAlloca(Info
, kTagGranuleSize
);
565 AllocaInst
*AI
= Info
.AI
;
566 unsigned int Tag
= NextTag
;
567 NextTag
= (NextTag
+ 1) % 16;
568 // Replace alloca with tagp(alloca).
569 IRBuilder
<> IRB(Info
.AI
->getNextNode());
570 Instruction
*TagPCall
=
571 IRB
.CreateIntrinsic(Intrinsic::aarch64_tagp
, {Info
.AI
->getType()},
572 {Constant::getNullValue(Info
.AI
->getType()), Base
,
573 ConstantInt::get(IRB
.getInt64Ty(), Tag
)});
574 if (Info
.AI
->hasName())
575 TagPCall
->setName(Info
.AI
->getName() + ".tag");
576 // Does not replace metadata, so we don't have to handle DbgVariableRecords.
577 Info
.AI
->replaceUsesWithIf(TagPCall
, [&](const Use
&U
) {
578 return !memtag::isLifetimeIntrinsic(U
.getUser());
580 TagPCall
->setOperand(0, Info
.AI
);
582 // Calls to functions that may return twice (e.g. setjmp) confuse the
583 // postdominator analysis, and will leave us to keep memory tagged after
584 // function return. Work around this by always untagging at every return
585 // statement if return_twice functions are called.
586 bool StandardLifetime
=
587 !SInfo
.CallsReturnTwice
&&
588 SInfo
.UnrecognizedLifetimes
.empty() &&
589 memtag::isStandardLifetime(Info
.LifetimeStart
, Info
.LifetimeEnd
, DT
, LI
,
591 if (StandardLifetime
) {
592 IntrinsicInst
*Start
= Info
.LifetimeStart
[0];
594 cast
<ConstantInt
>(Start
->getArgOperand(0))->getZExtValue();
595 Size
= alignTo(Size
, kTagGranuleSize
);
596 tagAlloca(AI
, Start
->getNextNode(), TagPCall
, Size
);
598 auto TagEnd
= [&](Instruction
*Node
) { untagAlloca(AI
, Node
, Size
); };
600 !memtag::forAllReachableExits(*DT
, *PDT
, *LI
, Start
, Info
.LifetimeEnd
,
601 SInfo
.RetVec
, TagEnd
)) {
602 for (auto *End
: Info
.LifetimeEnd
)
603 End
->eraseFromParent();
606 uint64_t Size
= *Info
.AI
->getAllocationSize(*DL
);
607 Value
*Ptr
= IRB
.CreatePointerCast(TagPCall
, IRB
.getPtrTy());
608 tagAlloca(AI
, &*IRB
.GetInsertPoint(), Ptr
, Size
);
609 for (auto *RI
: SInfo
.RetVec
) {
610 untagAlloca(AI
, RI
, Size
);
612 // We may have inserted tag/untag outside of any lifetime interval.
613 // Remove all lifetime intrinsics for this alloca.
614 for (auto *II
: Info
.LifetimeStart
)
615 II
->eraseFromParent();
616 for (auto *II
: Info
.LifetimeEnd
)
617 II
->eraseFromParent();
620 memtag::annotateDebugRecords(Info
, Tag
);
623 // If we have instrumented at least one alloca, all unrecognized lifetime
624 // intrinsics have to go.
625 for (auto *I
: SInfo
.UnrecognizedLifetimes
)
626 I
->eraseFromParent();