1 //===- AArch64StackTagging.cpp - Stack tagging in IR --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 //===----------------------------------------------------------------------===//
12 #include "AArch64InstrInfo.h"
13 #include "AArch64Subtarget.h"
14 #include "AArch64TargetMachine.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/DepthFirstIterator.h"
17 #include "llvm/ADT/MapVector.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/LoopInfo.h"
23 #include "llvm/Analysis/ScalarEvolution.h"
24 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/CodeGen/LiveRegUnits.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineInstr.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineLoopInfo.h"
33 #include "llvm/CodeGen/MachineOperand.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/TargetPassConfig.h"
36 #include "llvm/CodeGen/TargetRegisterInfo.h"
37 #include "llvm/IR/DebugLoc.h"
38 #include "llvm/IR/Dominators.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/GetElementPtrTypeIterator.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Metadata.h"
45 #include "llvm/Pass.h"
46 #include "llvm/Support/Casting.h"
47 #include "llvm/Support/Debug.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Transforms/Utils/Local.h"
56 #define DEBUG_TYPE "stack-tagging"
58 static cl::opt
<bool> ClMergeInit(
59 "stack-tagging-merge-init", cl::Hidden
, cl::init(true), cl::ZeroOrMore
,
60 cl::desc("merge stack variable initializers with tagging when possible"));
62 static cl::opt
<unsigned> ClScanLimit("stack-tagging-merge-init-scan-limit",
63 cl::init(40), cl::Hidden
);
65 static constexpr unsigned kTagGranuleSize
= 16;
69 class InitializerBuilder
{
74 Function
*SetTagZeroFn
;
77 // List of initializers sorted by start offset.
82 SmallVector
<Range
, 4> Ranges
;
83 // 8-aligned offset => 8-byte initializer
84 // Missing keys are zero initialized.
85 std::map
<uint64_t, Value
*> Out
;
88 InitializerBuilder(uint64_t Size
, const DataLayout
*DL
, Value
*BasePtr
,
89 Function
*SetTagFn
, Function
*SetTagZeroFn
,
91 : Size(Size
), DL(DL
), BasePtr(BasePtr
), SetTagFn(SetTagFn
),
92 SetTagZeroFn(SetTagZeroFn
), StgpFn(StgpFn
) {}
94 bool addRange(uint64_t Start
, uint64_t End
, Instruction
*Inst
) {
95 auto I
= std::lower_bound(
96 Ranges
.begin(), Ranges
.end(), Start
,
97 [](const Range
&LHS
, uint64_t RHS
) { return LHS
.End
<= RHS
; });
98 if (I
!= Ranges
.end() && End
> I
->Start
) {
102 Ranges
.insert(I
, {Start
, End
, Inst
});
106 bool addStore(uint64_t Offset
, StoreInst
*SI
, const DataLayout
*DL
) {
107 int64_t StoreSize
= DL
->getTypeStoreSize(SI
->getOperand(0)->getType());
108 if (!addRange(Offset
, Offset
+ StoreSize
, SI
))
111 applyStore(IRB
, Offset
, Offset
+ StoreSize
, SI
->getOperand(0));
115 bool addMemSet(uint64_t Offset
, MemSetInst
*MSI
) {
116 uint64_t StoreSize
= cast
<ConstantInt
>(MSI
->getLength())->getZExtValue();
117 if (!addRange(Offset
, Offset
+ StoreSize
, MSI
))
119 IRBuilder
<> IRB(MSI
);
120 applyMemSet(IRB
, Offset
, Offset
+ StoreSize
,
121 cast
<ConstantInt
>(MSI
->getValue()));
125 void applyMemSet(IRBuilder
<> &IRB
, int64_t Start
, int64_t End
,
127 // Out[] does not distinguish between zero and undef, and we already know
128 // that this memset does not overlap with any other initializer. Nothing to
132 for (int64_t Offset
= Start
- Start
% 8; Offset
< End
; Offset
+= 8) {
133 uint64_t Cst
= 0x0101010101010101UL
;
134 int LowBits
= Offset
< Start
? (Start
- Offset
) * 8 : 0;
136 Cst
= (Cst
>> LowBits
) << LowBits
;
137 int HighBits
= End
- Offset
< 8 ? (8 - (End
- Offset
)) * 8 : 0;
139 Cst
= (Cst
<< HighBits
) >> HighBits
;
141 ConstantInt::get(IRB
.getInt64Ty(), Cst
* V
->getZExtValue());
143 Value
*&CurrentV
= Out
[Offset
];
147 CurrentV
= IRB
.CreateOr(CurrentV
, C
);
152 // Take a 64-bit slice of the value starting at the given offset (in bytes).
153 // Offset can be negative. Pad with zeroes on both sides when necessary.
154 Value
*sliceValue(IRBuilder
<> &IRB
, Value
*V
, int64_t Offset
) {
156 V
= IRB
.CreateLShr(V
, Offset
* 8);
157 V
= IRB
.CreateZExtOrTrunc(V
, IRB
.getInt64Ty());
158 } else if (Offset
< 0) {
159 V
= IRB
.CreateZExtOrTrunc(V
, IRB
.getInt64Ty());
160 V
= IRB
.CreateShl(V
, -Offset
* 8);
162 V
= IRB
.CreateZExtOrTrunc(V
, IRB
.getInt64Ty());
167 void applyStore(IRBuilder
<> &IRB
, int64_t Start
, int64_t End
,
168 Value
*StoredValue
) {
169 StoredValue
= flatten(IRB
, StoredValue
);
170 for (int64_t Offset
= Start
- Start
% 8; Offset
< End
; Offset
+= 8) {
171 Value
*V
= sliceValue(IRB
, StoredValue
, Offset
- Start
);
172 Value
*&CurrentV
= Out
[Offset
];
176 CurrentV
= IRB
.CreateOr(CurrentV
, V
);
181 void generate(IRBuilder
<> &IRB
) {
182 LLVM_DEBUG(dbgs() << "Combined initializer\n");
183 // No initializers => the entire allocation is undef.
184 if (Ranges
.empty()) {
185 emitUndef(IRB
, 0, Size
);
189 // Look through 8-byte initializer list 16 bytes at a time;
190 // If one of the two 8-byte halfs is non-zero non-undef, emit STGP.
191 // Otherwise, emit zeroes up to next available item.
192 uint64_t LastOffset
= 0;
193 for (uint64_t Offset
= 0; Offset
< Size
; Offset
+= 16) {
194 auto I1
= Out
.find(Offset
);
195 auto I2
= Out
.find(Offset
+ 8);
196 if (I1
== Out
.end() && I2
== Out
.end())
199 if (Offset
> LastOffset
)
200 emitZeroes(IRB
, LastOffset
, Offset
- LastOffset
);
202 Value
*Store1
= I1
== Out
.end() ? Constant::getNullValue(IRB
.getInt64Ty())
204 Value
*Store2
= I2
== Out
.end() ? Constant::getNullValue(IRB
.getInt64Ty())
206 emitPair(IRB
, Offset
, Store1
, Store2
);
207 LastOffset
= Offset
+ 16;
210 // memset(0) does not update Out[], therefore the tail can be either undef
212 if (LastOffset
< Size
)
213 emitZeroes(IRB
, LastOffset
, Size
- LastOffset
);
215 for (const auto &R
: Ranges
) {
216 R
.Inst
->eraseFromParent();
220 void emitZeroes(IRBuilder
<> &IRB
, uint64_t Offset
, uint64_t Size
) {
221 LLVM_DEBUG(dbgs() << " [" << Offset
<< ", " << Offset
+ Size
223 Value
*Ptr
= BasePtr
;
225 Ptr
= IRB
.CreateConstGEP1_32(Ptr
, Offset
);
226 IRB
.CreateCall(SetTagZeroFn
,
227 {Ptr
, ConstantInt::get(IRB
.getInt64Ty(), Size
)});
230 void emitUndef(IRBuilder
<> &IRB
, uint64_t Offset
, uint64_t Size
) {
231 LLVM_DEBUG(dbgs() << " [" << Offset
<< ", " << Offset
+ Size
233 Value
*Ptr
= BasePtr
;
235 Ptr
= IRB
.CreateConstGEP1_32(Ptr
, Offset
);
236 IRB
.CreateCall(SetTagFn
, {Ptr
, ConstantInt::get(IRB
.getInt64Ty(), Size
)});
239 void emitPair(IRBuilder
<> &IRB
, uint64_t Offset
, Value
*A
, Value
*B
) {
240 LLVM_DEBUG(dbgs() << " [" << Offset
<< ", " << Offset
+ 16 << "):\n");
241 LLVM_DEBUG(dbgs() << " " << *A
<< "\n " << *B
<< "\n");
242 Value
*Ptr
= BasePtr
;
244 Ptr
= IRB
.CreateConstGEP1_32(Ptr
, Offset
);
245 IRB
.CreateCall(StgpFn
, {Ptr
, A
, B
});
248 Value
*flatten(IRBuilder
<> &IRB
, Value
*V
) {
249 if (V
->getType()->isIntegerTy())
251 // vector of pointers -> vector of ints
252 if (VectorType
*VecTy
= dyn_cast
<VectorType
>(V
->getType())) {
253 LLVMContext
&Ctx
= IRB
.getContext();
254 Type
*EltTy
= VecTy
->getElementType();
255 if (EltTy
->isPointerTy()) {
256 uint32_t EltSize
= DL
->getTypeSizeInBits(EltTy
);
257 Type
*NewTy
= VectorType::get(IntegerType::get(Ctx
, EltSize
),
258 VecTy
->getNumElements());
259 V
= IRB
.CreatePointerCast(V
, NewTy
);
262 return IRB
.CreateBitOrPointerCast(
263 V
, IRB
.getIntNTy(DL
->getTypeStoreSize(V
->getType()) * 8));
267 class AArch64StackTagging
: public FunctionPass
{
270 SmallVector
<IntrinsicInst
*, 2> LifetimeStart
;
271 SmallVector
<IntrinsicInst
*, 2> LifetimeEnd
;
272 SmallVector
<DbgVariableIntrinsic
*, 2> DbgVariableIntrinsics
;
273 int Tag
; // -1 for non-tagged allocations
279 static char ID
; // Pass ID, replacement for typeid
281 AArch64StackTagging(bool MergeInit
= true)
283 MergeInit(ClMergeInit
.getNumOccurrences() > 0 ? ClMergeInit
285 initializeAArch64StackTaggingPass(*PassRegistry::getPassRegistry());
288 bool isInterestingAlloca(const AllocaInst
&AI
);
289 void alignAndPadAlloca(AllocaInfo
&Info
);
291 void tagAlloca(AllocaInst
*AI
, Instruction
*InsertBefore
, Value
*Ptr
,
293 void untagAlloca(AllocaInst
*AI
, Instruction
*InsertBefore
, uint64_t Size
);
295 Instruction
*collectInitializers(Instruction
*StartInst
, Value
*StartPtr
,
296 uint64_t Size
, InitializerBuilder
&IB
);
299 insertBaseTaggedPointer(const MapVector
<AllocaInst
*, AllocaInfo
> &Allocas
,
300 const DominatorTree
*DT
);
301 bool runOnFunction(Function
&F
) override
;
303 StringRef
getPassName() const override
{ return "AArch64 Stack Tagging"; }
307 Function
*SetTagFunc
;
308 const DataLayout
*DL
;
311 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
312 AU
.setPreservesCFG();
314 AU
.addRequired
<AAResultsWrapperPass
>();
318 } // end anonymous namespace
320 char AArch64StackTagging::ID
= 0;
322 INITIALIZE_PASS_BEGIN(AArch64StackTagging
, DEBUG_TYPE
, "AArch64 Stack Tagging",
324 INITIALIZE_PASS_END(AArch64StackTagging
, DEBUG_TYPE
, "AArch64 Stack Tagging",
327 FunctionPass
*llvm::createAArch64StackTaggingPass(bool MergeInit
) {
328 return new AArch64StackTagging(MergeInit
);
331 Instruction
*AArch64StackTagging::collectInitializers(Instruction
*StartInst
,
334 InitializerBuilder
&IB
) {
335 MemoryLocation AllocaLoc
{StartPtr
, Size
};
336 Instruction
*LastInst
= StartInst
;
337 BasicBlock::iterator
BI(StartInst
);
340 for (; Count
< ClScanLimit
&& !BI
->isTerminator(); ++BI
) {
341 if (!isa
<DbgInfoIntrinsic
>(*BI
))
344 if (isNoModRef(AA
->getModRefInfo(&*BI
, AllocaLoc
)))
347 if (!isa
<StoreInst
>(BI
) && !isa
<MemSetInst
>(BI
)) {
348 // If the instruction is readnone, ignore it, otherwise bail out. We
349 // don't even allow readonly here because we don't want something like:
350 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
351 if (BI
->mayWriteToMemory() || BI
->mayReadFromMemory())
356 if (StoreInst
*NextStore
= dyn_cast
<StoreInst
>(BI
)) {
357 if (!NextStore
->isSimple())
360 // Check to see if this store is to a constant offset from the start ptr.
361 Optional
<int64_t> Offset
=
362 isPointerOffset(StartPtr
, NextStore
->getPointerOperand(), *DL
);
366 if (!IB
.addStore(*Offset
, NextStore
, DL
))
368 LastInst
= NextStore
;
370 MemSetInst
*MSI
= cast
<MemSetInst
>(BI
);
372 if (MSI
->isVolatile() || !isa
<ConstantInt
>(MSI
->getLength()))
375 if (!isa
<ConstantInt
>(MSI
->getValue()))
378 // Check to see if this store is to a constant offset from the start ptr.
379 Optional
<int64_t> Offset
= isPointerOffset(StartPtr
, MSI
->getDest(), *DL
);
383 if (!IB
.addMemSet(*Offset
, MSI
))
391 bool AArch64StackTagging::isInterestingAlloca(const AllocaInst
&AI
) {
392 // FIXME: support dynamic allocas
394 AI
.getAllocatedType()->isSized() && AI
.isStaticAlloca() &&
395 // alloca() may be called with 0 size, ignore it.
396 AI
.getAllocationSizeInBits(*DL
).getValue() > 0 &&
397 // inalloca allocas are not treated as static, and we don't want
398 // dynamic alloca instrumentation for them as well.
399 !AI
.isUsedWithInAlloca() &&
400 // swifterror allocas are register promoted by ISel
402 return IsInteresting
;
405 void AArch64StackTagging::tagAlloca(AllocaInst
*AI
, Instruction
*InsertBefore
,
406 Value
*Ptr
, uint64_t Size
) {
407 auto SetTagZeroFunc
=
408 Intrinsic::getDeclaration(F
->getParent(), Intrinsic::aarch64_settag_zero
);
410 Intrinsic::getDeclaration(F
->getParent(), Intrinsic::aarch64_stgp
);
412 InitializerBuilder
IB(Size
, DL
, Ptr
, SetTagFunc
, SetTagZeroFunc
, StgpFunc
);
414 Triple(AI
->getModule()->getTargetTriple()).isLittleEndian();
415 // Current implementation of initializer merging assumes little endianness.
416 if (MergeInit
&& !F
->hasOptNone() && LittleEndian
) {
417 LLVM_DEBUG(dbgs() << "collecting initializers for " << *AI
418 << ", size = " << Size
<< "\n");
419 InsertBefore
= collectInitializers(InsertBefore
, Ptr
, Size
, IB
);
422 IRBuilder
<> IRB(InsertBefore
);
426 void AArch64StackTagging::untagAlloca(AllocaInst
*AI
, Instruction
*InsertBefore
,
428 IRBuilder
<> IRB(InsertBefore
);
429 IRB
.CreateCall(SetTagFunc
, {IRB
.CreatePointerCast(AI
, IRB
.getInt8PtrTy()),
430 ConstantInt::get(IRB
.getInt64Ty(), Size
)});
433 Instruction
*AArch64StackTagging::insertBaseTaggedPointer(
434 const MapVector
<AllocaInst
*, AllocaInfo
> &Allocas
,
435 const DominatorTree
*DT
) {
436 BasicBlock
*PrologueBB
= nullptr;
437 // Try sinking IRG as deep as possible to avoid hurting shrink wrap.
438 for (auto &I
: Allocas
) {
439 const AllocaInfo
&Info
= I
.second
;
440 AllocaInst
*AI
= Info
.AI
;
444 PrologueBB
= AI
->getParent();
447 PrologueBB
= DT
->findNearestCommonDominator(PrologueBB
, AI
->getParent());
451 IRBuilder
<> IRB(&PrologueBB
->front());
453 Intrinsic::getDeclaration(F
->getParent(), Intrinsic::aarch64_irg_sp
);
455 IRB
.CreateCall(IRG_SP
, {Constant::getNullValue(IRB
.getInt64Ty())});
456 Base
->setName("basetag");
460 void AArch64StackTagging::alignAndPadAlloca(AllocaInfo
&Info
) {
461 unsigned NewAlignment
= std::max(Info
.AI
->getAlignment(), kTagGranuleSize
);
462 Info
.AI
->setAlignment(NewAlignment
);
464 uint64_t Size
= Info
.AI
->getAllocationSizeInBits(*DL
).getValue() / 8;
465 uint64_t AlignedSize
= alignTo(Size
, kTagGranuleSize
);
466 if (Size
== AlignedSize
)
469 // Add padding to the alloca.
470 Type
*AllocatedType
=
471 Info
.AI
->isArrayAllocation()
473 Info
.AI
->getAllocatedType(),
474 dyn_cast
<ConstantInt
>(Info
.AI
->getArraySize())->getZExtValue())
475 : Info
.AI
->getAllocatedType();
477 ArrayType::get(Type::getInt8Ty(F
->getContext()), AlignedSize
- Size
);
478 Type
*TypeWithPadding
= StructType::get(AllocatedType
, PaddingType
);
479 auto *NewAI
= new AllocaInst(
480 TypeWithPadding
, Info
.AI
->getType()->getAddressSpace(), nullptr, "", Info
.AI
);
481 NewAI
->takeName(Info
.AI
);
482 NewAI
->setAlignment(Info
.AI
->getAlignment());
483 NewAI
->setUsedWithInAlloca(Info
.AI
->isUsedWithInAlloca());
484 NewAI
->setSwiftError(Info
.AI
->isSwiftError());
485 NewAI
->copyMetadata(*Info
.AI
);
487 auto *NewPtr
= new BitCastInst(NewAI
, Info
.AI
->getType(), "", Info
.AI
);
488 Info
.AI
->replaceAllUsesWith(NewPtr
);
489 Info
.AI
->eraseFromParent();
493 // FIXME: check for MTE extension
494 bool AArch64StackTagging::runOnFunction(Function
&Fn
) {
495 if (!Fn
.hasFnAttribute(Attribute::SanitizeMemTag
))
499 DL
= &Fn
.getParent()->getDataLayout();
501 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
503 MapVector
<AllocaInst
*, AllocaInfo
> Allocas
; // need stable iteration order
504 SmallVector
<Instruction
*, 8> RetVec
;
505 DenseMap
<Value
*, AllocaInst
*> AllocaForValue
;
506 SmallVector
<Instruction
*, 4> UnrecognizedLifetimes
;
508 for (auto &BB
: *F
) {
509 for (BasicBlock::iterator IT
= BB
.begin(); IT
!= BB
.end(); ++IT
) {
510 Instruction
*I
= &*IT
;
511 if (auto *AI
= dyn_cast
<AllocaInst
>(I
)) {
516 if (auto *DVI
= dyn_cast
<DbgVariableIntrinsic
>(I
)) {
518 dyn_cast_or_null
<AllocaInst
>(DVI
->getVariableLocation())) {
519 Allocas
[AI
].DbgVariableIntrinsics
.push_back(DVI
);
524 auto *II
= dyn_cast
<IntrinsicInst
>(I
);
525 if (II
&& (II
->getIntrinsicID() == Intrinsic::lifetime_start
||
526 II
->getIntrinsicID() == Intrinsic::lifetime_end
)) {
528 llvm::findAllocaForValue(II
->getArgOperand(1), AllocaForValue
);
530 UnrecognizedLifetimes
.push_back(I
);
533 if (II
->getIntrinsicID() == Intrinsic::lifetime_start
)
534 Allocas
[AI
].LifetimeStart
.push_back(II
);
536 Allocas
[AI
].LifetimeEnd
.push_back(II
);
539 if (isa
<ReturnInst
>(I
) || isa
<ResumeInst
>(I
) || isa
<CleanupReturnInst
>(I
))
548 int NumInterestingAllocas
= 0;
549 for (auto &I
: Allocas
) {
550 AllocaInfo
&Info
= I
.second
;
553 if (!isInterestingAlloca(*Info
.AI
)) {
558 alignAndPadAlloca(Info
);
559 NumInterestingAllocas
++;
561 NextTag
= (NextTag
+ 1) % 16;
564 if (NumInterestingAllocas
== 0)
568 Intrinsic::getDeclaration(F
->getParent(), Intrinsic::aarch64_settag
);
570 // Compute DT only if the function has the attribute, there are more than 1
571 // interesting allocas, and it is not available for free.
573 if (NumInterestingAllocas
> 1) {
574 auto *DTWP
= getAnalysisIfAvailable
<DominatorTreeWrapperPass
>();
576 Base
= insertBaseTaggedPointer(Allocas
, &DTWP
->getDomTree());
578 DominatorTree
DT(*F
);
579 Base
= insertBaseTaggedPointer(Allocas
, &DT
);
582 Base
= insertBaseTaggedPointer(Allocas
, nullptr);
585 for (auto &I
: Allocas
) {
586 const AllocaInfo
&Info
= I
.second
;
587 AllocaInst
*AI
= Info
.AI
;
591 // Replace alloca with tagp(alloca).
592 IRBuilder
<> IRB(Info
.AI
->getNextNode());
593 Function
*TagP
= Intrinsic::getDeclaration(
594 F
->getParent(), Intrinsic::aarch64_tagp
, {Info
.AI
->getType()});
595 Instruction
*TagPCall
=
596 IRB
.CreateCall(TagP
, {Constant::getNullValue(Info
.AI
->getType()), Base
,
597 ConstantInt::get(IRB
.getInt64Ty(), Info
.Tag
)});
598 if (Info
.AI
->hasName())
599 TagPCall
->setName(Info
.AI
->getName() + ".tag");
600 Info
.AI
->replaceAllUsesWith(TagPCall
);
601 TagPCall
->setOperand(0, Info
.AI
);
603 if (UnrecognizedLifetimes
.empty() && Info
.LifetimeStart
.size() == 1 &&
604 Info
.LifetimeEnd
.size() == 1) {
605 IntrinsicInst
*Start
= Info
.LifetimeStart
[0];
607 dyn_cast
<ConstantInt
>(Start
->getArgOperand(0))->getZExtValue();
608 Size
= alignTo(Size
, kTagGranuleSize
);
609 tagAlloca(AI
, Start
->getNextNode(), Start
->getArgOperand(1), Size
);
610 untagAlloca(AI
, Info
.LifetimeEnd
[0], Size
);
612 uint64_t Size
= Info
.AI
->getAllocationSizeInBits(*DL
).getValue() / 8;
613 Value
*Ptr
= IRB
.CreatePointerCast(TagPCall
, IRB
.getInt8PtrTy());
614 tagAlloca(AI
, &*IRB
.GetInsertPoint(), Ptr
, Size
);
615 for (auto &RI
: RetVec
) {
616 untagAlloca(AI
, RI
, Size
);
618 // We may have inserted tag/untag outside of any lifetime interval.
619 // Remove all lifetime intrinsics for this alloca.
620 for (auto &II
: Info
.LifetimeStart
)
621 II
->eraseFromParent();
622 for (auto &II
: Info
.LifetimeEnd
)
623 II
->eraseFromParent();
626 // Fixup debug intrinsics to point to the new alloca.
627 for (auto DVI
: Info
.DbgVariableIntrinsics
)
630 MetadataAsValue::get(F
->getContext(), LocalAsMetadata::get(Info
.AI
)));
633 // If we have instrumented at least one alloca, all unrecognized lifetime
634 // instrinsics have to go.
635 for (auto &I
: UnrecognizedLifetimes
)
636 I
->eraseFromParent();