[llvm-exegesis] [NFC] Fixing typo.
[llvm-complete.git] / lib / Transforms / Instrumentation / HWAddressSanitizer.cpp
blob6ecbd239f5b2c621cf286e3d4abf725ef4579f07
1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of HWAddressSanitizer, an address sanity checker
11 /// based on tagged addressing.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/ADT/SmallVector.h"
15 #include "llvm/ADT/StringExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/ADT/Triple.h"
18 #include "llvm/IR/Attributes.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/InlineAsm.h"
27 #include "llvm/IR/InstVisitor.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/MDBuilder.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Transforms/Instrumentation.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/Utils/ModuleUtils.h"
45 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
46 #include <sstream>
48 using namespace llvm;
50 #define DEBUG_TYPE "hwasan"
52 static const char *const kHwasanModuleCtorName = "hwasan.module_ctor";
53 static const char *const kHwasanInitName = "__hwasan_init";
55 static const char *const kHwasanShadowMemoryDynamicAddress =
56 "__hwasan_shadow_memory_dynamic_address";
58 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
59 static const size_t kNumberOfAccessSizes = 5;
61 static const size_t kDefaultShadowScale = 4;
62 static const uint64_t kDynamicShadowSentinel =
63 std::numeric_limits<uint64_t>::max();
64 static const unsigned kPointerTagShift = 56;
66 static const unsigned kShadowBaseAlignment = 32;
68 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
69 "hwasan-memory-access-callback-prefix",
70 cl::desc("Prefix for memory access callbacks"), cl::Hidden,
71 cl::init("__hwasan_"));
73 static cl::opt<bool>
74 ClInstrumentWithCalls("hwasan-instrument-with-calls",
75 cl::desc("instrument reads and writes with callbacks"),
76 cl::Hidden, cl::init(false));
78 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
79 cl::desc("instrument read instructions"),
80 cl::Hidden, cl::init(true));
82 static cl::opt<bool> ClInstrumentWrites(
83 "hwasan-instrument-writes", cl::desc("instrument write instructions"),
84 cl::Hidden, cl::init(true));
86 static cl::opt<bool> ClInstrumentAtomics(
87 "hwasan-instrument-atomics",
88 cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
89 cl::init(true));
91 static cl::opt<bool> ClRecover(
92 "hwasan-recover",
93 cl::desc("Enable recovery mode (continue-after-error)."),
94 cl::Hidden, cl::init(false));
96 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
97 cl::desc("instrument stack (allocas)"),
98 cl::Hidden, cl::init(true));
100 static cl::opt<bool> ClUARRetagToZero(
101 "hwasan-uar-retag-to-zero",
102 cl::desc("Clear alloca tags before returning from the function to allow "
103 "non-instrumented and instrumented function calls mix. When set "
104 "to false, allocas are retagged before returning from the "
105 "function to detect use after return."),
106 cl::Hidden, cl::init(true));
108 static cl::opt<bool> ClGenerateTagsWithCalls(
109 "hwasan-generate-tags-with-calls",
110 cl::desc("generate new tags with runtime library calls"), cl::Hidden,
111 cl::init(false));
113 static cl::opt<int> ClMatchAllTag(
114 "hwasan-match-all-tag",
115 cl::desc("don't report bad accesses via pointers with this tag"),
116 cl::Hidden, cl::init(-1));
118 static cl::opt<bool> ClEnableKhwasan(
119 "hwasan-kernel",
120 cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
121 cl::Hidden, cl::init(false));
123 // These flags allow to change the shadow mapping and control how shadow memory
124 // is accessed. The shadow mapping looks like:
125 // Shadow = (Mem >> scale) + offset
127 static cl::opt<unsigned long long> ClMappingOffset(
128 "hwasan-mapping-offset",
129 cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden,
130 cl::init(0));
132 static cl::opt<bool>
133 ClWithIfunc("hwasan-with-ifunc",
134 cl::desc("Access dynamic shadow through an ifunc global on "
135 "platforms that support this"),
136 cl::Hidden, cl::init(false));
138 static cl::opt<bool> ClWithTls(
139 "hwasan-with-tls",
140 cl::desc("Access dynamic shadow through an thread-local pointer on "
141 "platforms that support this"),
142 cl::Hidden, cl::init(true));
144 static cl::opt<bool>
145 ClRecordStackHistory("hwasan-record-stack-history",
146 cl::desc("Record stack frames with tagged allocations "
147 "in a thread-local ring buffer"),
148 cl::Hidden, cl::init(true));
149 static cl::opt<bool>
150 ClCreateFrameDescriptions("hwasan-create-frame-descriptions",
151 cl::desc("create static frame descriptions"),
152 cl::Hidden, cl::init(true));
154 static cl::opt<bool>
155 ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
156 cl::desc("instrument memory intrinsics"),
157 cl::Hidden, cl::init(true));
159 static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
160 cl::desc("inline all checks"),
161 cl::Hidden, cl::init(false));
163 static cl::opt<bool> ClAllowIfunc("hwasan-allow-ifunc",
164 cl::desc("allow the use of ifunc"),
165 cl::Hidden, cl::init(false));
167 namespace {
169 /// An instrumentation pass implementing detection of addressability bugs
170 /// using tagged pointers.
171 class HWAddressSanitizer : public FunctionPass {
172 public:
173 // Pass identification, replacement for typeid.
174 static char ID;
176 explicit HWAddressSanitizer(bool CompileKernel = false, bool Recover = false)
177 : FunctionPass(ID) {
178 this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
179 this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
180 ClEnableKhwasan : CompileKernel;
183 StringRef getPassName() const override { return "HWAddressSanitizer"; }
185 bool runOnFunction(Function &F) override;
186 bool doInitialization(Module &M) override;
188 void initializeCallbacks(Module &M);
190 Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
191 Value *getDynamicShadowNonTls(IRBuilder<> &IRB);
193 void untagPointerOperand(Instruction *I, Value *Addr);
194 Value *shadowBase();
195 Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
196 void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
197 unsigned AccessSizeIndex,
198 Instruction *InsertBefore);
199 void instrumentMemIntrinsic(MemIntrinsic *MI);
200 bool instrumentMemAccess(Instruction *I);
201 Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
202 uint64_t *TypeSize, unsigned *Alignment,
203 Value **MaybeMask);
205 bool isInterestingAlloca(const AllocaInst &AI);
206 bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
207 Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
208 Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
209 bool instrumentStack(SmallVectorImpl<AllocaInst *> &Allocas,
210 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag);
211 Value *getNextTagWithCall(IRBuilder<> &IRB);
212 Value *getStackBaseTag(IRBuilder<> &IRB);
213 Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
214 unsigned AllocaNo);
215 Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
217 Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
218 Value *emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
220 private:
221 LLVMContext *C;
222 std::string CurModuleUniqueId;
223 Triple TargetTriple;
224 FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
226 // Frame description is a way to pass names/sizes of local variables
227 // to the run-time w/o adding extra executable code in every function.
228 // We do this by creating a separate section with {PC,Descr} pairs and passing
229 // the section beg/end to __hwasan_init_frames() at module init time.
230 std::string createFrameString(ArrayRef<AllocaInst*> Allocas);
231 void createFrameGlobal(Function &F, const std::string &FrameString);
232 // Get the section name for frame descriptions. Currently ELF-only.
233 const char *getFrameSection() { return "__hwasan_frames"; }
234 const char *getFrameSectionBeg() { return "__start___hwasan_frames"; }
235 const char *getFrameSectionEnd() { return "__stop___hwasan_frames"; }
236 GlobalVariable *createFrameSectionBound(Module &M, Type *Ty,
237 const char *Name) {
238 auto GV = new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
239 nullptr, Name);
240 GV->setVisibility(GlobalValue::HiddenVisibility);
241 return GV;
244 /// This struct defines the shadow mapping using the rule:
245 /// shadow = (mem >> Scale) + Offset.
246 /// If InGlobal is true, then
247 /// extern char __hwasan_shadow[];
248 /// shadow = (mem >> Scale) + &__hwasan_shadow
249 /// If InTls is true, then
250 /// extern char *__hwasan_tls;
251 /// shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
252 struct ShadowMapping {
253 int Scale;
254 uint64_t Offset;
255 bool InGlobal;
256 bool InTls;
258 void init(Triple &TargetTriple);
259 unsigned getAllocaAlignment() const { return 1U << Scale; }
261 ShadowMapping Mapping;
263 Type *IntptrTy;
264 Type *Int8PtrTy;
265 Type *Int8Ty;
266 Type *Int32Ty;
268 bool CompileKernel;
269 bool Recover;
271 Function *HwasanCtorFunction;
273 FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
274 FunctionCallee HwasanMemoryAccessCallbackSized[2];
276 FunctionCallee HwasanTagMemoryFunc;
277 FunctionCallee HwasanGenerateTagFunc;
278 FunctionCallee HwasanThreadEnterFunc;
280 Constant *ShadowGlobal;
282 Value *LocalDynamicShadow = nullptr;
283 GlobalValue *ThreadPtrGlobal = nullptr;
286 } // end anonymous namespace
288 char HWAddressSanitizer::ID = 0;
290 INITIALIZE_PASS_BEGIN(
291 HWAddressSanitizer, "hwasan",
292 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
293 false)
294 INITIALIZE_PASS_END(
295 HWAddressSanitizer, "hwasan",
296 "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
297 false)
299 FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel,
300 bool Recover) {
301 assert(!CompileKernel || Recover);
302 return new HWAddressSanitizer(CompileKernel, Recover);
305 /// Module-level initialization.
307 /// inserts a call to __hwasan_init to the module's constructor list.
308 bool HWAddressSanitizer::doInitialization(Module &M) {
309 LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
310 auto &DL = M.getDataLayout();
312 TargetTriple = Triple(M.getTargetTriple());
314 Mapping.init(TargetTriple);
316 C = &(M.getContext());
317 CurModuleUniqueId = getUniqueModuleId(&M);
318 IRBuilder<> IRB(*C);
319 IntptrTy = IRB.getIntPtrTy(DL);
320 Int8PtrTy = IRB.getInt8PtrTy();
321 Int8Ty = IRB.getInt8Ty();
322 Int32Ty = IRB.getInt32Ty();
324 HwasanCtorFunction = nullptr;
325 if (!CompileKernel) {
326 std::tie(HwasanCtorFunction, std::ignore) =
327 createSanitizerCtorAndInitFunctions(M, kHwasanModuleCtorName,
328 kHwasanInitName,
329 /*InitArgTypes=*/{},
330 /*InitArgs=*/{});
331 Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
332 HwasanCtorFunction->setComdat(CtorComdat);
333 appendToGlobalCtors(M, HwasanCtorFunction, 0, HwasanCtorFunction);
335 // Create a zero-length global in __hwasan_frame so that the linker will
336 // always create start and stop symbols.
338 // N.B. If we ever start creating associated metadata in this pass this
339 // global will need to be associated with the ctor.
340 Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
341 auto GV =
342 new GlobalVariable(M, Int8Arr0Ty, /*isConstantGlobal*/ true,
343 GlobalVariable::PrivateLinkage,
344 Constant::getNullValue(Int8Arr0Ty), "__hwasan");
345 GV->setSection(getFrameSection());
346 GV->setComdat(CtorComdat);
347 appendToCompilerUsed(M, GV);
349 IRBuilder<> IRBCtor(HwasanCtorFunction->getEntryBlock().getTerminator());
350 IRBCtor.CreateCall(
351 declareSanitizerInitFunction(M, "__hwasan_init_frames",
352 {Int8PtrTy, Int8PtrTy}),
353 {createFrameSectionBound(M, Int8Ty, getFrameSectionBeg()),
354 createFrameSectionBound(M, Int8Ty, getFrameSectionEnd())});
357 if (!TargetTriple.isAndroid())
358 appendToCompilerUsed(
359 M, ThreadPtrGlobal = new GlobalVariable(
360 M, IntptrTy, false, GlobalVariable::ExternalLinkage, nullptr,
361 "__hwasan_tls", nullptr, GlobalVariable::InitialExecTLSModel));
363 return true;
366 void HWAddressSanitizer::initializeCallbacks(Module &M) {
367 IRBuilder<> IRB(*C);
368 for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
369 const std::string TypeStr = AccessIsWrite ? "store" : "load";
370 const std::string EndingStr = Recover ? "_noabort" : "";
372 HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
373 ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
374 FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
376 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
377 AccessSizeIndex++) {
378 HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
379 M.getOrInsertFunction(
380 ClMemoryAccessCallbackPrefix + TypeStr +
381 itostr(1ULL << AccessSizeIndex) + EndingStr,
382 FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
386 HwasanTagMemoryFunc = M.getOrInsertFunction(
387 "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
388 HwasanGenerateTagFunc =
389 M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
391 ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
392 ArrayType::get(IRB.getInt8Ty(), 0));
394 const std::string MemIntrinCallbackPrefix =
395 CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
396 HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
397 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
398 IRB.getInt8PtrTy(), IntptrTy);
399 HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
400 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
401 IRB.getInt8PtrTy(), IntptrTy);
402 HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
403 IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
404 IRB.getInt32Ty(), IntptrTy);
406 HwasanThreadEnterFunc =
407 M.getOrInsertFunction("__hwasan_thread_enter", IRB.getVoidTy());
410 Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
411 // An empty inline asm with input reg == output reg.
412 // An opaque no-op cast, basically.
413 InlineAsm *Asm = InlineAsm::get(
414 FunctionType::get(Int8PtrTy, {ShadowGlobal->getType()}, false),
415 StringRef(""), StringRef("=r,0"),
416 /*hasSideEffects=*/false);
417 return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
420 Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
421 // Generate code only when dynamic addressing is needed.
422 if (Mapping.Offset != kDynamicShadowSentinel)
423 return nullptr;
425 if (Mapping.InGlobal) {
426 return getDynamicShadowIfunc(IRB);
427 } else {
428 Value *GlobalDynamicAddress =
429 IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
430 kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
431 return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
435 Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
436 bool *IsWrite,
437 uint64_t *TypeSize,
438 unsigned *Alignment,
439 Value **MaybeMask) {
440 // Skip memory accesses inserted by another instrumentation.
441 if (I->getMetadata("nosanitize")) return nullptr;
443 // Do not instrument the load fetching the dynamic shadow address.
444 if (LocalDynamicShadow == I)
445 return nullptr;
447 Value *PtrOperand = nullptr;
448 const DataLayout &DL = I->getModule()->getDataLayout();
449 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
450 if (!ClInstrumentReads) return nullptr;
451 *IsWrite = false;
452 *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
453 *Alignment = LI->getAlignment();
454 PtrOperand = LI->getPointerOperand();
455 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
456 if (!ClInstrumentWrites) return nullptr;
457 *IsWrite = true;
458 *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
459 *Alignment = SI->getAlignment();
460 PtrOperand = SI->getPointerOperand();
461 } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
462 if (!ClInstrumentAtomics) return nullptr;
463 *IsWrite = true;
464 *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
465 *Alignment = 0;
466 PtrOperand = RMW->getPointerOperand();
467 } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
468 if (!ClInstrumentAtomics) return nullptr;
469 *IsWrite = true;
470 *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
471 *Alignment = 0;
472 PtrOperand = XCHG->getPointerOperand();
475 if (PtrOperand) {
476 // Do not instrument accesses from different address spaces; we cannot deal
477 // with them.
478 Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
479 if (PtrTy->getPointerAddressSpace() != 0)
480 return nullptr;
482 // Ignore swifterror addresses.
483 // swifterror memory addresses are mem2reg promoted by instruction
484 // selection. As such they cannot have regular uses like an instrumentation
485 // function and it makes no sense to track them as memory.
486 if (PtrOperand->isSwiftError())
487 return nullptr;
490 return PtrOperand;
493 static unsigned getPointerOperandIndex(Instruction *I) {
494 if (LoadInst *LI = dyn_cast<LoadInst>(I))
495 return LI->getPointerOperandIndex();
496 if (StoreInst *SI = dyn_cast<StoreInst>(I))
497 return SI->getPointerOperandIndex();
498 if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
499 return RMW->getPointerOperandIndex();
500 if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
501 return XCHG->getPointerOperandIndex();
502 report_fatal_error("Unexpected instruction");
503 return -1;
506 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
507 size_t Res = countTrailingZeros(TypeSize / 8);
508 assert(Res < kNumberOfAccessSizes);
509 return Res;
512 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
513 if (TargetTriple.isAArch64())
514 return;
516 IRBuilder<> IRB(I);
517 Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
518 Value *UntaggedPtr =
519 IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
520 I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
523 Value *HWAddressSanitizer::shadowBase() {
524 if (LocalDynamicShadow)
525 return LocalDynamicShadow;
526 return ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, Mapping.Offset),
527 Int8PtrTy);
530 Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
531 // Mem >> Scale
532 Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
533 if (Mapping.Offset == 0)
534 return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
535 // (Mem >> Scale) + Offset
536 return IRB.CreateGEP(Int8Ty, shadowBase(), Shadow);
539 void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
540 unsigned AccessSizeIndex,
541 Instruction *InsertBefore) {
542 const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
543 IRBuilder<> IRB(InsertBefore);
545 if (!ClInlineAllChecks && TargetTriple.isAArch64() &&
546 TargetTriple.isOSBinFormatELF() && !Recover) {
547 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
548 Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
549 IRB.CreateCall(
550 Intrinsic::getDeclaration(M, Intrinsic::hwasan_check_memaccess),
551 {shadowBase(), Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
552 return;
555 Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
556 Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
557 IRB.getInt8Ty());
558 Value *AddrLong = untagPointer(IRB, PtrLong);
559 Value *Shadow = memToShadow(AddrLong, IRB);
560 Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
561 Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
563 int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
564 ClMatchAllTag : (CompileKernel ? 0xFF : -1);
565 if (matchAllTag != -1) {
566 Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
567 ConstantInt::get(PtrTag->getType(), matchAllTag));
568 TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
571 Instruction *CheckTerm =
572 SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
573 MDBuilder(*C).createBranchWeights(1, 100000));
575 IRB.SetInsertPoint(CheckTerm);
576 InlineAsm *Asm;
577 switch (TargetTriple.getArch()) {
578 case Triple::x86_64:
579 // The signal handler will find the data address in rdi.
580 Asm = InlineAsm::get(
581 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
582 "int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
583 "{rdi}",
584 /*hasSideEffects=*/true);
585 break;
586 case Triple::aarch64:
587 case Triple::aarch64_be:
588 // The signal handler will find the data address in x0.
589 Asm = InlineAsm::get(
590 FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
591 "brk #" + itostr(0x900 + AccessInfo),
592 "{x0}",
593 /*hasSideEffects=*/true);
594 break;
595 default:
596 report_fatal_error("unsupported architecture");
598 IRB.CreateCall(Asm, PtrLong);
601 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
602 IRBuilder<> IRB(MI);
603 if (isa<MemTransferInst>(MI)) {
604 IRB.CreateCall(
605 isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
606 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
607 IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
608 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
609 } else if (isa<MemSetInst>(MI)) {
610 IRB.CreateCall(
611 HWAsanMemset,
612 {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
613 IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
614 IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
616 MI->eraseFromParent();
619 bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
620 LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
621 bool IsWrite = false;
622 unsigned Alignment = 0;
623 uint64_t TypeSize = 0;
624 Value *MaybeMask = nullptr;
626 if (ClInstrumentMemIntrinsics && isa<MemIntrinsic>(I)) {
627 instrumentMemIntrinsic(cast<MemIntrinsic>(I));
628 return true;
631 Value *Addr =
632 isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
634 if (!Addr)
635 return false;
637 if (MaybeMask)
638 return false; //FIXME
640 IRBuilder<> IRB(I);
641 if (isPowerOf2_64(TypeSize) &&
642 (TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
643 (Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
644 Alignment >= TypeSize / 8)) {
645 size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
646 if (ClInstrumentWithCalls) {
647 IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
648 IRB.CreatePointerCast(Addr, IntptrTy));
649 } else {
650 instrumentMemAccessInline(Addr, IsWrite, AccessSizeIndex, I);
652 } else {
653 IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
654 {IRB.CreatePointerCast(Addr, IntptrTy),
655 ConstantInt::get(IntptrTy, TypeSize / 8)});
657 untagPointerOperand(I, Addr);
659 return true;
662 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
663 uint64_t ArraySize = 1;
664 if (AI.isArrayAllocation()) {
665 const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
666 assert(CI && "non-constant array size");
667 ArraySize = CI->getZExtValue();
669 Type *Ty = AI.getAllocatedType();
670 uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
671 return SizeInBytes * ArraySize;
674 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
675 Value *Tag) {
676 size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
677 ~(Mapping.getAllocaAlignment() - 1);
679 Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
680 if (ClInstrumentWithCalls) {
681 IRB.CreateCall(HwasanTagMemoryFunc,
682 {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
683 ConstantInt::get(IntptrTy, Size)});
684 } else {
685 size_t ShadowSize = Size >> Mapping.Scale;
686 Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
687 // If this memset is not inlined, it will be intercepted in the hwasan
688 // runtime library. That's OK, because the interceptor skips the checks if
689 // the address is in the shadow region.
690 // FIXME: the interceptor is not as fast as real memset. Consider lowering
691 // llvm.memset right here into either a sequence of stores, or a call to
692 // hwasan_tag_memory.
693 IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
695 return true;
698 static unsigned RetagMask(unsigned AllocaNo) {
699 // A list of 8-bit numbers that have at most one run of non-zero bits.
700 // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
701 // masks.
702 // The list does not include the value 255, which is used for UAR.
703 static unsigned FastMasks[] = {
704 0, 1, 2, 3, 4, 6, 7, 8, 12, 14, 15, 16, 24,
705 28, 30, 31, 32, 48, 56, 60, 62, 63, 64, 96, 112, 120,
706 124, 126, 127, 128, 192, 224, 240, 248, 252, 254};
707 return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
710 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
711 return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
714 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
715 if (ClGenerateTagsWithCalls)
716 return getNextTagWithCall(IRB);
717 // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
718 // first).
719 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
720 auto GetStackPointerFn =
721 Intrinsic::getDeclaration(M, Intrinsic::frameaddress);
722 Value *StackPointer = IRB.CreateCall(
723 GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
725 // Extract some entropy from the stack pointer for the tags.
726 // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
727 // between functions).
728 Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
729 Value *StackTag =
730 IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20),
731 "hwasan.stack.base.tag");
732 return StackTag;
735 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
736 AllocaInst *AI, unsigned AllocaNo) {
737 if (ClGenerateTagsWithCalls)
738 return getNextTagWithCall(IRB);
739 return IRB.CreateXor(StackTag,
740 ConstantInt::get(IntptrTy, RetagMask(AllocaNo)));
743 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
744 if (ClUARRetagToZero)
745 return ConstantInt::get(IntptrTy, 0);
746 if (ClGenerateTagsWithCalls)
747 return getNextTagWithCall(IRB);
748 return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU));
751 // Add a tag to an address.
752 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
753 Value *PtrLong, Value *Tag) {
754 Value *TaggedPtrLong;
755 if (CompileKernel) {
756 // Kernel addresses have 0xFF in the most significant byte.
757 Value *ShiftedTag = IRB.CreateOr(
758 IRB.CreateShl(Tag, kPointerTagShift),
759 ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1));
760 TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
761 } else {
762 // Userspace can simply do OR (tag << 56);
763 Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift);
764 TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
766 return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
769 // Remove tag from an address.
770 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
771 Value *UntaggedPtrLong;
772 if (CompileKernel) {
773 // Kernel addresses have 0xFF in the most significant byte.
774 UntaggedPtrLong = IRB.CreateOr(PtrLong,
775 ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift));
776 } else {
777 // Userspace addresses have 0x00.
778 UntaggedPtrLong = IRB.CreateAnd(PtrLong,
779 ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift)));
781 return UntaggedPtrLong;
784 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
785 Module *M = IRB.GetInsertBlock()->getParent()->getParent();
786 if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
787 // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
788 // in Bionic's libc/private/bionic_tls.h.
789 Function *ThreadPointerFunc =
790 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
791 Value *SlotPtr = IRB.CreatePointerCast(
792 IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
793 IRB.CreateCall(ThreadPointerFunc), 0x30),
794 Ty->getPointerTo(0));
795 return SlotPtr;
797 if (ThreadPtrGlobal)
798 return ThreadPtrGlobal;
801 return nullptr;
804 // Creates a string with a description of the stack frame (set of Allocas).
805 // The string is intended to be human readable.
806 // The current form is: Size1 Name1; Size2 Name2; ...
807 std::string
808 HWAddressSanitizer::createFrameString(ArrayRef<AllocaInst *> Allocas) {
809 std::ostringstream Descr;
810 for (auto AI : Allocas)
811 Descr << getAllocaSizeInBytes(*AI) << " " << AI->getName().str() << "; ";
812 return Descr.str();
815 // Creates a global in the frame section which consists of two pointers:
816 // the function PC and the frame string constant.
817 void HWAddressSanitizer::createFrameGlobal(Function &F,
818 const std::string &FrameString) {
819 Module &M = *F.getParent();
820 auto DescrGV = createPrivateGlobalForString(M, FrameString, true);
821 auto PtrPairTy = StructType::get(F.getType(), DescrGV->getType());
822 auto GV = new GlobalVariable(
823 M, PtrPairTy, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
824 ConstantStruct::get(PtrPairTy, (Constant *)&F, (Constant *)DescrGV),
825 "__hwasan");
826 GV->setSection(getFrameSection());
827 appendToCompilerUsed(M, GV);
828 // Put GV into the F's Comadat so that if F is deleted GV can be deleted too.
829 if (auto Comdat =
830 GetOrCreateFunctionComdat(F, TargetTriple, CurModuleUniqueId))
831 GV->setComdat(Comdat);
834 Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
835 bool WithFrameRecord) {
836 if (!Mapping.InTls)
837 return getDynamicShadowNonTls(IRB);
839 if (ClAllowIfunc && !WithFrameRecord && TargetTriple.isAndroid())
840 return getDynamicShadowIfunc(IRB);
842 Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
843 assert(SlotPtr);
845 Instruction *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
847 Function *F = IRB.GetInsertBlock()->getParent();
848 if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
849 Value *ThreadLongEqZero =
850 IRB.CreateICmpEQ(ThreadLong, ConstantInt::get(IntptrTy, 0));
851 auto *Br = cast<BranchInst>(SplitBlockAndInsertIfThen(
852 ThreadLongEqZero, cast<Instruction>(ThreadLongEqZero)->getNextNode(),
853 false, MDBuilder(*C).createBranchWeights(1, 100000)));
855 IRB.SetInsertPoint(Br);
856 // FIXME: This should call a new runtime function with a custom calling
857 // convention to avoid needing to spill all arguments here.
858 IRB.CreateCall(HwasanThreadEnterFunc);
859 LoadInst *ReloadThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
861 IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
862 PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);
863 ThreadLongPhi->addIncoming(ThreadLong, ThreadLong->getParent());
864 ThreadLongPhi->addIncoming(ReloadThreadLong, ReloadThreadLong->getParent());
865 ThreadLong = ThreadLongPhi;
868 // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
869 Value *ThreadLongMaybeUntagged =
870 TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
872 if (WithFrameRecord) {
873 // Prepare ring buffer data.
874 auto PC = IRB.CreatePtrToInt(F, IntptrTy);
875 auto GetStackPointerFn =
876 Intrinsic::getDeclaration(F->getParent(), Intrinsic::frameaddress);
877 Value *SP = IRB.CreatePtrToInt(
878 IRB.CreateCall(GetStackPointerFn,
879 {Constant::getNullValue(IRB.getInt32Ty())}),
880 IntptrTy);
881 // Mix SP and PC. TODO: also add the tag to the mix.
882 // Assumptions:
883 // PC is 0x0000PPPPPPPPPPPP (48 bits are meaningful, others are zero)
884 // SP is 0xsssssssssssSSSS0 (4 lower bits are zero)
885 // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
886 // 0xSSSSPPPPPPPPPPPP
887 SP = IRB.CreateShl(SP, 44);
889 // Store data to ring buffer.
890 Value *RecordPtr =
891 IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
892 IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
894 // Update the ring buffer. Top byte of ThreadLong defines the size of the
895 // buffer in pages, it must be a power of two, and the start of the buffer
896 // must be aligned by twice that much. Therefore wrap around of the ring
897 // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
898 // The use of AShr instead of LShr is due to
899 // https://bugs.llvm.org/show_bug.cgi?id=39030
900 // Runtime library makes sure not to use the highest bit.
901 Value *WrapMask = IRB.CreateXor(
902 IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
903 ConstantInt::get(IntptrTy, (uint64_t)-1));
904 Value *ThreadLongNew = IRB.CreateAnd(
905 IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
906 IRB.CreateStore(ThreadLongNew, SlotPtr);
909 // Get shadow base address by aligning RecordPtr up.
910 // Note: this is not correct if the pointer is already aligned.
911 // Runtime library will make sure this never happens.
912 Value *ShadowBase = IRB.CreateAdd(
913 IRB.CreateOr(
914 ThreadLongMaybeUntagged,
915 ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
916 ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
917 ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
918 return ShadowBase;
921 bool HWAddressSanitizer::instrumentStack(
922 SmallVectorImpl<AllocaInst *> &Allocas,
923 SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) {
924 // Ideally, we want to calculate tagged stack base pointer, and rewrite all
925 // alloca addresses using that. Unfortunately, offsets are not known yet
926 // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
927 // temp, shift-OR it into each alloca address and xor with the retag mask.
928 // This generates one extra instruction per alloca use.
929 for (unsigned N = 0; N < Allocas.size(); ++N) {
930 auto *AI = Allocas[N];
931 IRBuilder<> IRB(AI->getNextNode());
933 // Replace uses of the alloca with tagged address.
934 Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
935 Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
936 Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
937 std::string Name =
938 AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
939 Replacement->setName(Name + ".hwasan");
941 for (auto UI = AI->use_begin(), UE = AI->use_end(); UI != UE;) {
942 Use &U = *UI++;
943 if (U.getUser() != AILong)
944 U.set(Replacement);
947 tagAlloca(IRB, AI, Tag);
949 for (auto RI : RetVec) {
950 IRB.SetInsertPoint(RI);
952 // Re-tag alloca memory with the special UAR tag.
953 Value *Tag = getUARTag(IRB, StackTag);
954 tagAlloca(IRB, AI, Tag);
958 return true;
961 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
962 return (AI.getAllocatedType()->isSized() &&
963 // FIXME: instrument dynamic allocas, too
964 AI.isStaticAlloca() &&
965 // alloca() may be called with 0 size, ignore it.
966 getAllocaSizeInBytes(AI) > 0 &&
967 // We are only interested in allocas not promotable to registers.
968 // Promotable allocas are common under -O0.
969 !isAllocaPromotable(&AI) &&
970 // inalloca allocas are not treated as static, and we don't want
971 // dynamic alloca instrumentation for them as well.
972 !AI.isUsedWithInAlloca() &&
973 // swifterror allocas are register promoted by ISel
974 !AI.isSwiftError());
977 bool HWAddressSanitizer::runOnFunction(Function &F) {
978 if (&F == HwasanCtorFunction)
979 return false;
981 if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
982 return false;
984 LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
986 SmallVector<Instruction*, 16> ToInstrument;
987 SmallVector<AllocaInst*, 8> AllocasToInstrument;
988 SmallVector<Instruction*, 8> RetVec;
989 for (auto &BB : F) {
990 for (auto &Inst : BB) {
991 if (ClInstrumentStack)
992 if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
993 // Realign all allocas. We don't want small uninteresting allocas to
994 // hide in instrumented alloca's padding.
995 if (AI->getAlignment() < Mapping.getAllocaAlignment())
996 AI->setAlignment(Mapping.getAllocaAlignment());
997 // Instrument some of them.
998 if (isInterestingAlloca(*AI))
999 AllocasToInstrument.push_back(AI);
1000 continue;
1003 if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
1004 isa<CleanupReturnInst>(Inst))
1005 RetVec.push_back(&Inst);
1007 Value *MaybeMask = nullptr;
1008 bool IsWrite;
1009 unsigned Alignment;
1010 uint64_t TypeSize;
1011 Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
1012 &Alignment, &MaybeMask);
1013 if (Addr || isa<MemIntrinsic>(Inst))
1014 ToInstrument.push_back(&Inst);
1018 if (AllocasToInstrument.empty() && ToInstrument.empty())
1019 return false;
1021 if (ClCreateFrameDescriptions && !AllocasToInstrument.empty())
1022 createFrameGlobal(F, createFrameString(AllocasToInstrument));
1024 initializeCallbacks(*F.getParent());
1026 assert(!LocalDynamicShadow);
1028 Instruction *InsertPt = &*F.getEntryBlock().begin();
1029 IRBuilder<> EntryIRB(InsertPt);
1030 LocalDynamicShadow = emitPrologue(EntryIRB,
1031 /*WithFrameRecord*/ ClRecordStackHistory &&
1032 !AllocasToInstrument.empty());
1034 bool Changed = false;
1035 if (!AllocasToInstrument.empty()) {
1036 Value *StackTag =
1037 ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1038 Changed |= instrumentStack(AllocasToInstrument, RetVec, StackTag);
1041 // If we split the entry block, move any allocas that were originally in the
1042 // entry block back into the entry block so that they aren't treated as
1043 // dynamic allocas.
1044 if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1045 InsertPt = &*F.getEntryBlock().begin();
1046 for (auto II = EntryIRB.GetInsertBlock()->begin(),
1047 IE = EntryIRB.GetInsertBlock()->end();
1048 II != IE;) {
1049 Instruction *I = &*II++;
1050 if (auto *AI = dyn_cast<AllocaInst>(I))
1051 if (isa<ConstantInt>(AI->getArraySize()))
1052 I->moveBefore(InsertPt);
1056 for (auto Inst : ToInstrument)
1057 Changed |= instrumentMemAccess(Inst);
1059 LocalDynamicShadow = nullptr;
1061 return Changed;
1064 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) {
1065 Scale = kDefaultShadowScale;
1066 if (ClMappingOffset.getNumOccurrences() > 0) {
1067 InGlobal = false;
1068 InTls = false;
1069 Offset = ClMappingOffset;
1070 } else if (ClEnableKhwasan || ClInstrumentWithCalls) {
1071 InGlobal = false;
1072 InTls = false;
1073 Offset = 0;
1074 } else if (ClWithIfunc) {
1075 InGlobal = true;
1076 InTls = false;
1077 Offset = kDynamicShadowSentinel;
1078 } else if (ClWithTls) {
1079 InGlobal = false;
1080 InTls = true;
1081 Offset = kDynamicShadowSentinel;
1082 } else {
1083 InGlobal = false;
1084 InTls = false;
1085 Offset = kDynamicShadowSentinel;