1 //===-- X86AsmInstrumentation.cpp - Instrument X86 inline assembly --------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "X86AsmInstrumentation.h"
10 #include "MCTargetDesc/X86MCTargetDesc.h"
11 #include "X86Operand.h"
12 #include "llvm/ADT/Triple.h"
13 #include "llvm/ADT/Twine.h"
14 #include "llvm/MC/MCContext.h"
15 #include "llvm/MC/MCDwarf.h"
16 #include "llvm/MC/MCExpr.h"
17 #include "llvm/MC/MCInst.h"
18 #include "llvm/MC/MCInstBuilder.h"
19 #include "llvm/MC/MCInstrInfo.h"
20 #include "llvm/MC/MCParser/MCParsedAsmOperand.h"
21 #include "llvm/MC/MCParser/MCTargetAsmParser.h"
22 #include "llvm/MC/MCRegisterInfo.h"
23 #include "llvm/MC/MCStreamer.h"
24 #include "llvm/MC/MCSubtargetInfo.h"
25 #include "llvm/MC/MCTargetOptions.h"
26 #include "llvm/Support/CommandLine.h"
27 #include "llvm/Support/ErrorHandling.h"
28 #include "llvm/Support/SMLoc.h"
36 // Following comment describes how assembly instrumentation works.
37 // Currently we have only AddressSanitizer instrumentation, but we're
38 // planning to implement MemorySanitizer for inline assembly too. If
39 // you're not familiar with AddressSanitizer algorithm, please, read
40 // https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
42 // When inline assembly is parsed by an instance of X86AsmParser, all
43 // instructions are emitted via EmitInstruction method. That's the
44 // place where X86AsmInstrumentation analyzes an instruction and
45 // decides, whether the instruction should be emitted as is or
46 // instrumentation is required. The latter case happens when an
47 // instruction reads from or writes to memory. Now instruction opcode
48 // is explicitly checked, and if an instruction has a memory operand
49 // (for instance, movq (%rsi, %rcx, 8), %rax) - it should be
50 // instrumented. There're also exist instructions that modify
51 // memory but don't have an explicit memory operands, for instance,
54 // Let's consider at first 8-byte memory accesses when an instruction
55 // has an explicit memory operand. In this case we need two registers -
56 // AddressReg to compute address of a memory cells which are accessed
57 // and ShadowReg to compute corresponding shadow address. So, we need
58 // to spill both registers before instrumentation code and restore them
59 // after instrumentation. Thus, in general, instrumentation code will
61 // PUSHF # Store flags, otherwise they will be overwritten
62 // PUSH AddressReg # spill AddressReg
63 // PUSH ShadowReg # spill ShadowReg
64 // LEA MemOp, AddressReg # compute address of the memory operand
65 // MOV AddressReg, ShadowReg
67 // # ShadowOffset(AddressReg >> 3) contains address of a shadow
68 // # corresponding to MemOp.
69 // CMP ShadowOffset(ShadowReg), 0 # test shadow value
70 // JZ .Done # when shadow equals to zero, everything is fine
71 // MOV AddressReg, RDI
72 // # Call __asan_report function with AddressReg as an argument
75 // POP ShadowReg # Restore ShadowReg
76 // POP AddressReg # Restore AddressReg
77 // POPF # Restore flags
79 // Memory accesses with different size (1-, 2-, 4- and 16-byte) are
80 // handled in a similar manner, but small memory accesses (less than 8
81 // byte) require an additional ScratchReg, which is used for shadow value.
83 // If, suppose, we're instrumenting an instruction like movs, only
84 // contents of RDI, RDI + AccessSize * RCX, RSI, RSI + AccessSize *
85 // RCX are checked. In this case there're no need to spill and restore
86 // AddressReg , ShadowReg or flags four times, they're saved on stack
87 // just once, before instrumentation of these four addresses, and restored
88 // at the end of the instrumentation.
90 // There exist several things which complicate this simple algorithm.
91 // * Instrumented memory operand can have RSP as a base or an index
92 // register. So we need to add a constant offset before computation
93 // of memory address, since flags, AddressReg, ShadowReg, etc. were
94 // already stored on stack and RSP was modified.
95 // * Debug info (usually, DWARF) should be adjusted, because sometimes
96 // RSP is used as a frame register. So, we need to select some
97 // register as a frame register and temprorary override current CFA
100 using namespace llvm
;
102 static cl::opt
<bool> ClAsanInstrumentAssembly(
103 "asan-instrument-assembly",
104 cl::desc("instrument assembly with AddressSanitizer checks"), cl::Hidden
,
107 static const int64_t MinAllowedDisplacement
=
108 std::numeric_limits
<int32_t>::min();
109 static const int64_t MaxAllowedDisplacement
=
110 std::numeric_limits
<int32_t>::max();
112 static int64_t ApplyDisplacementBounds(int64_t Displacement
) {
113 return std::max(std::min(MaxAllowedDisplacement
, Displacement
),
114 MinAllowedDisplacement
);
117 static void CheckDisplacementBounds(int64_t Displacement
) {
118 assert(Displacement
>= MinAllowedDisplacement
&&
119 Displacement
<= MaxAllowedDisplacement
);
122 static bool IsStackReg(unsigned Reg
) {
123 return Reg
== X86::RSP
|| Reg
== X86::ESP
;
126 static bool IsSmallMemAccess(unsigned AccessSize
) { return AccessSize
< 8; }
130 class X86AddressSanitizer
: public X86AsmInstrumentation
{
132 struct RegisterContext
{
135 REG_OFFSET_ADDRESS
= 0,
141 RegisterContext(unsigned AddressReg
, unsigned ShadowReg
,
142 unsigned ScratchReg
) {
143 BusyRegs
.push_back(convReg(AddressReg
, 64));
144 BusyRegs
.push_back(convReg(ShadowReg
, 64));
145 BusyRegs
.push_back(convReg(ScratchReg
, 64));
148 unsigned AddressReg(unsigned Size
) const {
149 return convReg(BusyRegs
[REG_OFFSET_ADDRESS
], Size
);
152 unsigned ShadowReg(unsigned Size
) const {
153 return convReg(BusyRegs
[REG_OFFSET_SHADOW
], Size
);
156 unsigned ScratchReg(unsigned Size
) const {
157 return convReg(BusyRegs
[REG_OFFSET_SCRATCH
], Size
);
160 void AddBusyReg(unsigned Reg
) {
161 if (Reg
!= X86::NoRegister
)
162 BusyRegs
.push_back(convReg(Reg
, 64));
165 void AddBusyRegs(const X86Operand
&Op
) {
166 AddBusyReg(Op
.getMemBaseReg());
167 AddBusyReg(Op
.getMemIndexReg());
170 unsigned ChooseFrameReg(unsigned Size
) const {
171 static const MCPhysReg Candidates
[] = { X86::RBP
, X86::RAX
, X86::RBX
,
172 X86::RCX
, X86::RDX
, X86::RDI
,
174 for (unsigned Reg
: Candidates
) {
175 if (!std::count(BusyRegs
.begin(), BusyRegs
.end(), Reg
))
176 return convReg(Reg
, Size
);
178 return X86::NoRegister
;
182 unsigned convReg(unsigned Reg
, unsigned Size
) const {
183 return Reg
== X86::NoRegister
? Reg
: getX86SubSuperRegister(Reg
, Size
);
186 std::vector
<unsigned> BusyRegs
;
189 X86AddressSanitizer(const MCSubtargetInfo
*&STI
)
190 : X86AsmInstrumentation(STI
), RepPrefix(false), OrigSPOffset(0) {}
192 ~X86AddressSanitizer() override
= default;
194 // X86AsmInstrumentation implementation:
195 void InstrumentAndEmitInstruction(const MCInst
&Inst
, OperandVector
&Operands
,
196 MCContext
&Ctx
, const MCInstrInfo
&MII
,
197 MCStreamer
&Out
) override
{
198 InstrumentMOVS(Inst
, Operands
, Ctx
, MII
, Out
);
200 EmitInstruction(Out
, MCInstBuilder(X86::REP_PREFIX
));
202 InstrumentMOV(Inst
, Operands
, Ctx
, MII
, Out
);
204 RepPrefix
= (Inst
.getOpcode() == X86::REP_PREFIX
);
206 EmitInstruction(Out
, Inst
);
209 // Adjusts up stack and saves all registers used in instrumentation.
210 virtual void InstrumentMemOperandPrologue(const RegisterContext
&RegCtx
,
212 MCStreamer
&Out
) = 0;
214 // Restores all registers used in instrumentation and adjusts stack.
215 virtual void InstrumentMemOperandEpilogue(const RegisterContext
&RegCtx
,
217 MCStreamer
&Out
) = 0;
219 virtual void InstrumentMemOperandSmall(X86Operand
&Op
, unsigned AccessSize
,
221 const RegisterContext
&RegCtx
,
222 MCContext
&Ctx
, MCStreamer
&Out
) = 0;
223 virtual void InstrumentMemOperandLarge(X86Operand
&Op
, unsigned AccessSize
,
225 const RegisterContext
&RegCtx
,
226 MCContext
&Ctx
, MCStreamer
&Out
) = 0;
228 virtual void InstrumentMOVSImpl(unsigned AccessSize
, MCContext
&Ctx
,
229 MCStreamer
&Out
) = 0;
231 void InstrumentMemOperand(X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
232 const RegisterContext
&RegCtx
, MCContext
&Ctx
,
234 void InstrumentMOVSBase(unsigned DstReg
, unsigned SrcReg
, unsigned CntReg
,
235 unsigned AccessSize
, MCContext
&Ctx
, MCStreamer
&Out
);
237 void InstrumentMOVS(const MCInst
&Inst
, OperandVector
&Operands
,
238 MCContext
&Ctx
, const MCInstrInfo
&MII
, MCStreamer
&Out
);
239 void InstrumentMOV(const MCInst
&Inst
, OperandVector
&Operands
,
240 MCContext
&Ctx
, const MCInstrInfo
&MII
, MCStreamer
&Out
);
243 void EmitLabel(MCStreamer
&Out
, MCSymbol
*Label
) { Out
.EmitLabel(Label
); }
245 void EmitLEA(X86Operand
&Op
, unsigned Size
, unsigned Reg
, MCStreamer
&Out
) {
246 assert(Size
== 32 || Size
== 64);
248 Inst
.setOpcode(Size
== 32 ? X86::LEA32r
: X86::LEA64r
);
249 Inst
.addOperand(MCOperand::createReg(getX86SubSuperRegister(Reg
, Size
)));
250 Op
.addMemOperands(Inst
, 5);
251 EmitInstruction(Out
, Inst
);
254 void ComputeMemOperandAddress(X86Operand
&Op
, unsigned Size
,
255 unsigned Reg
, MCContext
&Ctx
, MCStreamer
&Out
);
257 // Creates new memory operand with Displacement added to an original
258 // displacement. Residue will contain a residue which could happen when the
259 // total displacement exceeds 32-bit limitation.
260 std::unique_ptr
<X86Operand
> AddDisplacement(X86Operand
&Op
,
261 int64_t Displacement
,
262 MCContext
&Ctx
, int64_t *Residue
);
264 bool is64BitMode() const {
265 return STI
->getFeatureBits()[X86::Mode64Bit
];
268 bool is32BitMode() const {
269 return STI
->getFeatureBits()[X86::Mode32Bit
];
272 bool is16BitMode() const {
273 return STI
->getFeatureBits()[X86::Mode16Bit
];
276 unsigned getPointerWidth() {
277 if (is16BitMode()) return 16;
278 if (is32BitMode()) return 32;
279 if (is64BitMode()) return 64;
280 llvm_unreachable("invalid mode");
283 // True when previous instruction was actually REP prefix.
286 // Offset from the original SP register.
287 int64_t OrigSPOffset
;
290 void X86AddressSanitizer::InstrumentMemOperand(
291 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
292 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
293 assert(Op
.isMem() && "Op should be a memory operand.");
294 assert((AccessSize
& (AccessSize
- 1)) == 0 && AccessSize
<= 16 &&
295 "AccessSize should be a power of two, less or equal than 16.");
296 // FIXME: take into account load/store alignment.
297 if (IsSmallMemAccess(AccessSize
))
298 InstrumentMemOperandSmall(Op
, AccessSize
, IsWrite
, RegCtx
, Ctx
, Out
);
300 InstrumentMemOperandLarge(Op
, AccessSize
, IsWrite
, RegCtx
, Ctx
, Out
);
303 void X86AddressSanitizer::InstrumentMOVSBase(unsigned DstReg
, unsigned SrcReg
,
306 MCContext
&Ctx
, MCStreamer
&Out
) {
307 // FIXME: check whole ranges [DstReg .. DstReg + AccessSize * (CntReg - 1)]
308 // and [SrcReg .. SrcReg + AccessSize * (CntReg - 1)].
309 RegisterContext
RegCtx(X86::RDX
/* AddressReg */, X86::RAX
/* ShadowReg */,
310 IsSmallMemAccess(AccessSize
)
312 : X86::NoRegister
/* ScratchReg */);
313 RegCtx
.AddBusyReg(DstReg
);
314 RegCtx
.AddBusyReg(SrcReg
);
315 RegCtx
.AddBusyReg(CntReg
);
317 InstrumentMemOperandPrologue(RegCtx
, Ctx
, Out
);
321 const MCExpr
*Disp
= MCConstantExpr::create(0, Ctx
);
322 std::unique_ptr
<X86Operand
> Op(X86Operand::CreateMem(
323 getPointerWidth(), 0, Disp
, SrcReg
, 0, AccessSize
, SMLoc(), SMLoc()));
324 InstrumentMemOperand(*Op
, AccessSize
, false /* IsWrite */, RegCtx
, Ctx
,
328 // Test -1(%SrcReg, %CntReg, AccessSize)
330 const MCExpr
*Disp
= MCConstantExpr::create(-1, Ctx
);
331 std::unique_ptr
<X86Operand
> Op(X86Operand::CreateMem(
332 getPointerWidth(), 0, Disp
, SrcReg
, CntReg
, AccessSize
, SMLoc(),
334 InstrumentMemOperand(*Op
, AccessSize
, false /* IsWrite */, RegCtx
, Ctx
,
340 const MCExpr
*Disp
= MCConstantExpr::create(0, Ctx
);
341 std::unique_ptr
<X86Operand
> Op(X86Operand::CreateMem(
342 getPointerWidth(), 0, Disp
, DstReg
, 0, AccessSize
, SMLoc(), SMLoc()));
343 InstrumentMemOperand(*Op
, AccessSize
, true /* IsWrite */, RegCtx
, Ctx
, Out
);
346 // Test -1(%DstReg, %CntReg, AccessSize)
348 const MCExpr
*Disp
= MCConstantExpr::create(-1, Ctx
);
349 std::unique_ptr
<X86Operand
> Op(X86Operand::CreateMem(
350 getPointerWidth(), 0, Disp
, DstReg
, CntReg
, AccessSize
, SMLoc(),
352 InstrumentMemOperand(*Op
, AccessSize
, true /* IsWrite */, RegCtx
, Ctx
, Out
);
355 InstrumentMemOperandEpilogue(RegCtx
, Ctx
, Out
);
358 void X86AddressSanitizer::InstrumentMOVS(const MCInst
&Inst
,
359 OperandVector
&Operands
,
360 MCContext
&Ctx
, const MCInstrInfo
&MII
,
362 // Access size in bytes.
363 unsigned AccessSize
= 0;
365 switch (Inst
.getOpcode()) {
382 InstrumentMOVSImpl(AccessSize
, Ctx
, Out
);
385 void X86AddressSanitizer::InstrumentMOV(const MCInst
&Inst
,
386 OperandVector
&Operands
, MCContext
&Ctx
,
387 const MCInstrInfo
&MII
,
389 // Access size in bytes.
390 unsigned AccessSize
= 0;
392 switch (Inst
.getOpcode()) {
423 const bool IsWrite
= MII
.get(Inst
.getOpcode()).mayStore();
425 for (unsigned Ix
= 0; Ix
< Operands
.size(); ++Ix
) {
426 assert(Operands
[Ix
]);
427 MCParsedAsmOperand
&Op
= *Operands
[Ix
];
429 X86Operand
&MemOp
= static_cast<X86Operand
&>(Op
);
430 RegisterContext
RegCtx(
431 X86::RDI
/* AddressReg */, X86::RAX
/* ShadowReg */,
432 IsSmallMemAccess(AccessSize
) ? X86::RCX
433 : X86::NoRegister
/* ScratchReg */);
434 RegCtx
.AddBusyRegs(MemOp
);
435 InstrumentMemOperandPrologue(RegCtx
, Ctx
, Out
);
436 InstrumentMemOperand(MemOp
, AccessSize
, IsWrite
, RegCtx
, Ctx
, Out
);
437 InstrumentMemOperandEpilogue(RegCtx
, Ctx
, Out
);
442 void X86AddressSanitizer::ComputeMemOperandAddress(X86Operand
&Op
,
444 unsigned Reg
, MCContext
&Ctx
,
446 int64_t Displacement
= 0;
447 if (IsStackReg(Op
.getMemBaseReg()))
448 Displacement
-= OrigSPOffset
;
449 if (IsStackReg(Op
.getMemIndexReg()))
450 Displacement
-= OrigSPOffset
* Op
.getMemScale();
452 assert(Displacement
>= 0);
455 if (Displacement
== 0) {
456 EmitLEA(Op
, Size
, Reg
, Out
);
461 std::unique_ptr
<X86Operand
> NewOp
=
462 AddDisplacement(Op
, Displacement
, Ctx
, &Residue
);
463 EmitLEA(*NewOp
, Size
, Reg
, Out
);
465 while (Residue
!= 0) {
466 const MCConstantExpr
*Disp
=
467 MCConstantExpr::create(ApplyDisplacementBounds(Residue
), Ctx
);
468 std::unique_ptr
<X86Operand
> DispOp
=
469 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, Reg
, 0, 1, SMLoc(),
471 EmitLEA(*DispOp
, Size
, Reg
, Out
);
472 Residue
-= Disp
->getValue();
476 std::unique_ptr
<X86Operand
>
477 X86AddressSanitizer::AddDisplacement(X86Operand
&Op
, int64_t Displacement
,
478 MCContext
&Ctx
, int64_t *Residue
) {
479 assert(Displacement
>= 0);
481 if (Displacement
== 0 ||
482 (Op
.getMemDisp() && Op
.getMemDisp()->getKind() != MCExpr::Constant
)) {
483 *Residue
= Displacement
;
484 return X86Operand::CreateMem(Op
.getMemModeSize(), Op
.getMemSegReg(),
485 Op
.getMemDisp(), Op
.getMemBaseReg(),
486 Op
.getMemIndexReg(), Op
.getMemScale(),
490 int64_t OrigDisplacement
=
491 static_cast<const MCConstantExpr
*>(Op
.getMemDisp())->getValue();
492 CheckDisplacementBounds(OrigDisplacement
);
493 Displacement
+= OrigDisplacement
;
495 int64_t NewDisplacement
= ApplyDisplacementBounds(Displacement
);
496 CheckDisplacementBounds(NewDisplacement
);
498 *Residue
= Displacement
- NewDisplacement
;
499 const MCExpr
*Disp
= MCConstantExpr::create(NewDisplacement
, Ctx
);
500 return X86Operand::CreateMem(Op
.getMemModeSize(), Op
.getMemSegReg(), Disp
,
501 Op
.getMemBaseReg(), Op
.getMemIndexReg(),
502 Op
.getMemScale(), SMLoc(), SMLoc());
505 class X86AddressSanitizer32
: public X86AddressSanitizer
{
507 static const long kShadowOffset
= 0x20000000;
509 X86AddressSanitizer32(const MCSubtargetInfo
*&STI
)
510 : X86AddressSanitizer(STI
) {}
512 ~X86AddressSanitizer32() override
= default;
514 unsigned GetFrameReg(const MCContext
&Ctx
, MCStreamer
&Out
) {
515 unsigned FrameReg
= GetFrameRegGeneric(Ctx
, Out
);
516 if (FrameReg
== X86::NoRegister
)
518 return getX86SubSuperRegister(FrameReg
, 32);
521 void SpillReg(MCStreamer
&Out
, unsigned Reg
) {
522 EmitInstruction(Out
, MCInstBuilder(X86::PUSH32r
).addReg(Reg
));
526 void RestoreReg(MCStreamer
&Out
, unsigned Reg
) {
527 EmitInstruction(Out
, MCInstBuilder(X86::POP32r
).addReg(Reg
));
531 void StoreFlags(MCStreamer
&Out
) {
532 EmitInstruction(Out
, MCInstBuilder(X86::PUSHF32
));
536 void RestoreFlags(MCStreamer
&Out
) {
537 EmitInstruction(Out
, MCInstBuilder(X86::POPF32
));
541 void InstrumentMemOperandPrologue(const RegisterContext
&RegCtx
,
543 MCStreamer
&Out
) override
{
544 unsigned LocalFrameReg
= RegCtx
.ChooseFrameReg(32);
545 assert(LocalFrameReg
!= X86::NoRegister
);
547 const MCRegisterInfo
*MRI
= Ctx
.getRegisterInfo();
548 unsigned FrameReg
= GetFrameReg(Ctx
, Out
);
549 if (MRI
&& FrameReg
!= X86::NoRegister
) {
550 SpillReg(Out
, LocalFrameReg
);
551 if (FrameReg
== X86::ESP
) {
552 Out
.EmitCFIAdjustCfaOffset(4 /* byte size of the LocalFrameReg */);
553 Out
.EmitCFIRelOffset(
554 MRI
->getDwarfRegNum(LocalFrameReg
, true /* IsEH */), 0);
558 MCInstBuilder(X86::MOV32rr
).addReg(LocalFrameReg
).addReg(FrameReg
));
559 Out
.EmitCFIRememberState();
560 Out
.EmitCFIDefCfaRegister(
561 MRI
->getDwarfRegNum(LocalFrameReg
, true /* IsEH */));
564 SpillReg(Out
, RegCtx
.AddressReg(32));
565 SpillReg(Out
, RegCtx
.ShadowReg(32));
566 if (RegCtx
.ScratchReg(32) != X86::NoRegister
)
567 SpillReg(Out
, RegCtx
.ScratchReg(32));
571 void InstrumentMemOperandEpilogue(const RegisterContext
&RegCtx
,
573 MCStreamer
&Out
) override
{
574 unsigned LocalFrameReg
= RegCtx
.ChooseFrameReg(32);
575 assert(LocalFrameReg
!= X86::NoRegister
);
578 if (RegCtx
.ScratchReg(32) != X86::NoRegister
)
579 RestoreReg(Out
, RegCtx
.ScratchReg(32));
580 RestoreReg(Out
, RegCtx
.ShadowReg(32));
581 RestoreReg(Out
, RegCtx
.AddressReg(32));
583 unsigned FrameReg
= GetFrameReg(Ctx
, Out
);
584 if (Ctx
.getRegisterInfo() && FrameReg
!= X86::NoRegister
) {
585 RestoreReg(Out
, LocalFrameReg
);
586 Out
.EmitCFIRestoreState();
587 if (FrameReg
== X86::ESP
)
588 Out
.EmitCFIAdjustCfaOffset(-4 /* byte size of the LocalFrameReg */);
592 void InstrumentMemOperandSmall(X86Operand
&Op
, unsigned AccessSize
,
594 const RegisterContext
&RegCtx
,
596 MCStreamer
&Out
) override
;
597 void InstrumentMemOperandLarge(X86Operand
&Op
, unsigned AccessSize
,
599 const RegisterContext
&RegCtx
,
601 MCStreamer
&Out
) override
;
602 void InstrumentMOVSImpl(unsigned AccessSize
, MCContext
&Ctx
,
603 MCStreamer
&Out
) override
;
606 void EmitCallAsanReport(unsigned AccessSize
, bool IsWrite
, MCContext
&Ctx
,
607 MCStreamer
&Out
, const RegisterContext
&RegCtx
) {
608 EmitInstruction(Out
, MCInstBuilder(X86::CLD
));
609 EmitInstruction(Out
, MCInstBuilder(X86::MMX_EMMS
));
611 EmitInstruction(Out
, MCInstBuilder(X86::AND32ri8
)
616 Out
, MCInstBuilder(X86::PUSH32r
).addReg(RegCtx
.AddressReg(32)));
618 MCSymbol
*FnSym
= Ctx
.getOrCreateSymbol(Twine("__asan_report_") +
619 (IsWrite
? "store" : "load") +
621 const MCSymbolRefExpr
*FnExpr
=
622 MCSymbolRefExpr::create(FnSym
, MCSymbolRefExpr::VK_PLT
, Ctx
);
623 EmitInstruction(Out
, MCInstBuilder(X86::CALLpcrel32
).addExpr(FnExpr
));
627 void X86AddressSanitizer32::InstrumentMemOperandSmall(
628 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
629 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
630 unsigned AddressRegI32
= RegCtx
.AddressReg(32);
631 unsigned ShadowRegI32
= RegCtx
.ShadowReg(32);
632 unsigned ShadowRegI8
= RegCtx
.ShadowReg(8);
634 assert(RegCtx
.ScratchReg(32) != X86::NoRegister
);
635 unsigned ScratchRegI32
= RegCtx
.ScratchReg(32);
637 ComputeMemOperandAddress(Op
, 32, AddressRegI32
, Ctx
, Out
);
639 EmitInstruction(Out
, MCInstBuilder(X86::MOV32rr
).addReg(ShadowRegI32
).addReg(
641 EmitInstruction(Out
, MCInstBuilder(X86::SHR32ri
)
642 .addReg(ShadowRegI32
)
643 .addReg(ShadowRegI32
)
648 Inst
.setOpcode(X86::MOV8rm
);
649 Inst
.addOperand(MCOperand::createReg(ShadowRegI8
));
650 const MCExpr
*Disp
= MCConstantExpr::create(kShadowOffset
, Ctx
);
651 std::unique_ptr
<X86Operand
> Op(
652 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ShadowRegI32
, 0, 1,
654 Op
->addMemOperands(Inst
, 5);
655 EmitInstruction(Out
, Inst
);
659 Out
, MCInstBuilder(X86::TEST8rr
).addReg(ShadowRegI8
).addReg(ShadowRegI8
));
660 MCSymbol
*DoneSym
= Ctx
.createTempSymbol();
661 const MCExpr
*DoneExpr
= MCSymbolRefExpr::create(DoneSym
, Ctx
);
662 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
664 EmitInstruction(Out
, MCInstBuilder(X86::MOV32rr
).addReg(ScratchRegI32
).addReg(
666 EmitInstruction(Out
, MCInstBuilder(X86::AND32ri
)
667 .addReg(ScratchRegI32
)
668 .addReg(ScratchRegI32
)
671 switch (AccessSize
) {
672 default: llvm_unreachable("Incorrect access size");
676 const MCExpr
*Disp
= MCConstantExpr::create(1, Ctx
);
677 std::unique_ptr
<X86Operand
> Op(
678 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ScratchRegI32
, 0, 1,
680 EmitLEA(*Op
, 32, ScratchRegI32
, Out
);
684 EmitInstruction(Out
, MCInstBuilder(X86::ADD32ri8
)
685 .addReg(ScratchRegI32
)
686 .addReg(ScratchRegI32
)
693 MCInstBuilder(X86::MOVSX32rr8
).addReg(ShadowRegI32
).addReg(ShadowRegI8
));
694 EmitInstruction(Out
, MCInstBuilder(X86::CMP32rr
).addReg(ScratchRegI32
).addReg(
696 EmitInstruction(Out
, MCInstBuilder(X86::JL_1
).addExpr(DoneExpr
));
698 EmitCallAsanReport(AccessSize
, IsWrite
, Ctx
, Out
, RegCtx
);
699 EmitLabel(Out
, DoneSym
);
702 void X86AddressSanitizer32::InstrumentMemOperandLarge(
703 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
704 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
705 unsigned AddressRegI32
= RegCtx
.AddressReg(32);
706 unsigned ShadowRegI32
= RegCtx
.ShadowReg(32);
708 ComputeMemOperandAddress(Op
, 32, AddressRegI32
, Ctx
, Out
);
710 EmitInstruction(Out
, MCInstBuilder(X86::MOV32rr
).addReg(ShadowRegI32
).addReg(
712 EmitInstruction(Out
, MCInstBuilder(X86::SHR32ri
)
713 .addReg(ShadowRegI32
)
714 .addReg(ShadowRegI32
)
718 switch (AccessSize
) {
719 default: llvm_unreachable("Incorrect access size");
721 Inst
.setOpcode(X86::CMP8mi
);
724 Inst
.setOpcode(X86::CMP16mi
);
727 const MCExpr
*Disp
= MCConstantExpr::create(kShadowOffset
, Ctx
);
728 std::unique_ptr
<X86Operand
> Op(
729 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ShadowRegI32
, 0, 1,
731 Op
->addMemOperands(Inst
, 5);
732 Inst
.addOperand(MCOperand::createImm(0));
733 EmitInstruction(Out
, Inst
);
735 MCSymbol
*DoneSym
= Ctx
.createTempSymbol();
736 const MCExpr
*DoneExpr
= MCSymbolRefExpr::create(DoneSym
, Ctx
);
737 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
739 EmitCallAsanReport(AccessSize
, IsWrite
, Ctx
, Out
, RegCtx
);
740 EmitLabel(Out
, DoneSym
);
743 void X86AddressSanitizer32::InstrumentMOVSImpl(unsigned AccessSize
,
748 // No need to test when ECX is equals to zero.
749 MCSymbol
*DoneSym
= Ctx
.createTempSymbol();
750 const MCExpr
*DoneExpr
= MCSymbolRefExpr::create(DoneSym
, Ctx
);
752 Out
, MCInstBuilder(X86::TEST32rr
).addReg(X86::ECX
).addReg(X86::ECX
));
753 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
755 // Instrument first and last elements in src and dst range.
756 InstrumentMOVSBase(X86::EDI
/* DstReg */, X86::ESI
/* SrcReg */,
757 X86::ECX
/* CntReg */, AccessSize
, Ctx
, Out
);
759 EmitLabel(Out
, DoneSym
);
763 class X86AddressSanitizer64
: public X86AddressSanitizer
{
765 static const long kShadowOffset
= 0x7fff8000;
767 X86AddressSanitizer64(const MCSubtargetInfo
*&STI
)
768 : X86AddressSanitizer(STI
) {}
770 ~X86AddressSanitizer64() override
= default;
772 unsigned GetFrameReg(const MCContext
&Ctx
, MCStreamer
&Out
) {
773 unsigned FrameReg
= GetFrameRegGeneric(Ctx
, Out
);
774 if (FrameReg
== X86::NoRegister
)
776 return getX86SubSuperRegister(FrameReg
, 64);
779 void SpillReg(MCStreamer
&Out
, unsigned Reg
) {
780 EmitInstruction(Out
, MCInstBuilder(X86::PUSH64r
).addReg(Reg
));
784 void RestoreReg(MCStreamer
&Out
, unsigned Reg
) {
785 EmitInstruction(Out
, MCInstBuilder(X86::POP64r
).addReg(Reg
));
789 void StoreFlags(MCStreamer
&Out
) {
790 EmitInstruction(Out
, MCInstBuilder(X86::PUSHF64
));
794 void RestoreFlags(MCStreamer
&Out
) {
795 EmitInstruction(Out
, MCInstBuilder(X86::POPF64
));
799 void InstrumentMemOperandPrologue(const RegisterContext
&RegCtx
,
801 MCStreamer
&Out
) override
{
802 unsigned LocalFrameReg
= RegCtx
.ChooseFrameReg(64);
803 assert(LocalFrameReg
!= X86::NoRegister
);
805 const MCRegisterInfo
*MRI
= Ctx
.getRegisterInfo();
806 unsigned FrameReg
= GetFrameReg(Ctx
, Out
);
807 if (MRI
&& FrameReg
!= X86::NoRegister
) {
808 SpillReg(Out
, X86::RBP
);
809 if (FrameReg
== X86::RSP
) {
810 Out
.EmitCFIAdjustCfaOffset(8 /* byte size of the LocalFrameReg */);
811 Out
.EmitCFIRelOffset(
812 MRI
->getDwarfRegNum(LocalFrameReg
, true /* IsEH */), 0);
816 MCInstBuilder(X86::MOV64rr
).addReg(LocalFrameReg
).addReg(FrameReg
));
817 Out
.EmitCFIRememberState();
818 Out
.EmitCFIDefCfaRegister(
819 MRI
->getDwarfRegNum(LocalFrameReg
, true /* IsEH */));
822 EmitAdjustRSP(Ctx
, Out
, -128);
823 SpillReg(Out
, RegCtx
.ShadowReg(64));
824 SpillReg(Out
, RegCtx
.AddressReg(64));
825 if (RegCtx
.ScratchReg(64) != X86::NoRegister
)
826 SpillReg(Out
, RegCtx
.ScratchReg(64));
830 void InstrumentMemOperandEpilogue(const RegisterContext
&RegCtx
,
832 MCStreamer
&Out
) override
{
833 unsigned LocalFrameReg
= RegCtx
.ChooseFrameReg(64);
834 assert(LocalFrameReg
!= X86::NoRegister
);
837 if (RegCtx
.ScratchReg(64) != X86::NoRegister
)
838 RestoreReg(Out
, RegCtx
.ScratchReg(64));
839 RestoreReg(Out
, RegCtx
.AddressReg(64));
840 RestoreReg(Out
, RegCtx
.ShadowReg(64));
841 EmitAdjustRSP(Ctx
, Out
, 128);
843 unsigned FrameReg
= GetFrameReg(Ctx
, Out
);
844 if (Ctx
.getRegisterInfo() && FrameReg
!= X86::NoRegister
) {
845 RestoreReg(Out
, LocalFrameReg
);
846 Out
.EmitCFIRestoreState();
847 if (FrameReg
== X86::RSP
)
848 Out
.EmitCFIAdjustCfaOffset(-8 /* byte size of the LocalFrameReg */);
852 void InstrumentMemOperandSmall(X86Operand
&Op
, unsigned AccessSize
,
854 const RegisterContext
&RegCtx
,
856 MCStreamer
&Out
) override
;
857 void InstrumentMemOperandLarge(X86Operand
&Op
, unsigned AccessSize
,
859 const RegisterContext
&RegCtx
,
861 MCStreamer
&Out
) override
;
862 void InstrumentMOVSImpl(unsigned AccessSize
, MCContext
&Ctx
,
863 MCStreamer
&Out
) override
;
866 void EmitAdjustRSP(MCContext
&Ctx
, MCStreamer
&Out
, long Offset
) {
867 const MCExpr
*Disp
= MCConstantExpr::create(Offset
, Ctx
);
868 std::unique_ptr
<X86Operand
> Op(
869 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, X86::RSP
, 0, 1,
871 EmitLEA(*Op
, 64, X86::RSP
, Out
);
872 OrigSPOffset
+= Offset
;
875 void EmitCallAsanReport(unsigned AccessSize
, bool IsWrite
, MCContext
&Ctx
,
876 MCStreamer
&Out
, const RegisterContext
&RegCtx
) {
877 EmitInstruction(Out
, MCInstBuilder(X86::CLD
));
878 EmitInstruction(Out
, MCInstBuilder(X86::MMX_EMMS
));
880 EmitInstruction(Out
, MCInstBuilder(X86::AND64ri8
)
885 if (RegCtx
.AddressReg(64) != X86::RDI
) {
886 EmitInstruction(Out
, MCInstBuilder(X86::MOV64rr
).addReg(X86::RDI
).addReg(
887 RegCtx
.AddressReg(64)));
889 MCSymbol
*FnSym
= Ctx
.getOrCreateSymbol(Twine("__asan_report_") +
890 (IsWrite
? "store" : "load") +
892 const MCSymbolRefExpr
*FnExpr
=
893 MCSymbolRefExpr::create(FnSym
, MCSymbolRefExpr::VK_PLT
, Ctx
);
894 EmitInstruction(Out
, MCInstBuilder(X86::CALL64pcrel32
).addExpr(FnExpr
));
898 } // end anonymous namespace
900 void X86AddressSanitizer64::InstrumentMemOperandSmall(
901 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
902 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
903 unsigned AddressRegI64
= RegCtx
.AddressReg(64);
904 unsigned AddressRegI32
= RegCtx
.AddressReg(32);
905 unsigned ShadowRegI64
= RegCtx
.ShadowReg(64);
906 unsigned ShadowRegI32
= RegCtx
.ShadowReg(32);
907 unsigned ShadowRegI8
= RegCtx
.ShadowReg(8);
909 assert(RegCtx
.ScratchReg(32) != X86::NoRegister
);
910 unsigned ScratchRegI32
= RegCtx
.ScratchReg(32);
912 ComputeMemOperandAddress(Op
, 64, AddressRegI64
, Ctx
, Out
);
914 EmitInstruction(Out
, MCInstBuilder(X86::MOV64rr
).addReg(ShadowRegI64
).addReg(
916 EmitInstruction(Out
, MCInstBuilder(X86::SHR64ri
)
917 .addReg(ShadowRegI64
)
918 .addReg(ShadowRegI64
)
922 Inst
.setOpcode(X86::MOV8rm
);
923 Inst
.addOperand(MCOperand::createReg(ShadowRegI8
));
924 const MCExpr
*Disp
= MCConstantExpr::create(kShadowOffset
, Ctx
);
925 std::unique_ptr
<X86Operand
> Op(
926 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ShadowRegI64
, 0, 1,
928 Op
->addMemOperands(Inst
, 5);
929 EmitInstruction(Out
, Inst
);
933 Out
, MCInstBuilder(X86::TEST8rr
).addReg(ShadowRegI8
).addReg(ShadowRegI8
));
934 MCSymbol
*DoneSym
= Ctx
.createTempSymbol();
935 const MCExpr
*DoneExpr
= MCSymbolRefExpr::create(DoneSym
, Ctx
);
936 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
938 EmitInstruction(Out
, MCInstBuilder(X86::MOV32rr
).addReg(ScratchRegI32
).addReg(
940 EmitInstruction(Out
, MCInstBuilder(X86::AND32ri
)
941 .addReg(ScratchRegI32
)
942 .addReg(ScratchRegI32
)
945 switch (AccessSize
) {
946 default: llvm_unreachable("Incorrect access size");
950 const MCExpr
*Disp
= MCConstantExpr::create(1, Ctx
);
951 std::unique_ptr
<X86Operand
> Op(
952 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ScratchRegI32
, 0, 1,
954 EmitLEA(*Op
, 32, ScratchRegI32
, Out
);
958 EmitInstruction(Out
, MCInstBuilder(X86::ADD32ri8
)
959 .addReg(ScratchRegI32
)
960 .addReg(ScratchRegI32
)
967 MCInstBuilder(X86::MOVSX32rr8
).addReg(ShadowRegI32
).addReg(ShadowRegI8
));
968 EmitInstruction(Out
, MCInstBuilder(X86::CMP32rr
).addReg(ScratchRegI32
).addReg(
970 EmitInstruction(Out
, MCInstBuilder(X86::JL_1
).addExpr(DoneExpr
));
972 EmitCallAsanReport(AccessSize
, IsWrite
, Ctx
, Out
, RegCtx
);
973 EmitLabel(Out
, DoneSym
);
976 void X86AddressSanitizer64::InstrumentMemOperandLarge(
977 X86Operand
&Op
, unsigned AccessSize
, bool IsWrite
,
978 const RegisterContext
&RegCtx
, MCContext
&Ctx
, MCStreamer
&Out
) {
979 unsigned AddressRegI64
= RegCtx
.AddressReg(64);
980 unsigned ShadowRegI64
= RegCtx
.ShadowReg(64);
982 ComputeMemOperandAddress(Op
, 64, AddressRegI64
, Ctx
, Out
);
984 EmitInstruction(Out
, MCInstBuilder(X86::MOV64rr
).addReg(ShadowRegI64
).addReg(
986 EmitInstruction(Out
, MCInstBuilder(X86::SHR64ri
)
987 .addReg(ShadowRegI64
)
988 .addReg(ShadowRegI64
)
992 switch (AccessSize
) {
993 default: llvm_unreachable("Incorrect access size");
995 Inst
.setOpcode(X86::CMP8mi
);
998 Inst
.setOpcode(X86::CMP16mi
);
1001 const MCExpr
*Disp
= MCConstantExpr::create(kShadowOffset
, Ctx
);
1002 std::unique_ptr
<X86Operand
> Op(
1003 X86Operand::CreateMem(getPointerWidth(), 0, Disp
, ShadowRegI64
, 0, 1,
1005 Op
->addMemOperands(Inst
, 5);
1006 Inst
.addOperand(MCOperand::createImm(0));
1007 EmitInstruction(Out
, Inst
);
1010 MCSymbol
*DoneSym
= Ctx
.createTempSymbol();
1011 const MCExpr
*DoneExpr
= MCSymbolRefExpr::create(DoneSym
, Ctx
);
1012 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
1014 EmitCallAsanReport(AccessSize
, IsWrite
, Ctx
, Out
, RegCtx
);
1015 EmitLabel(Out
, DoneSym
);
1018 void X86AddressSanitizer64::InstrumentMOVSImpl(unsigned AccessSize
,
1023 // No need to test when RCX is equals to zero.
1024 MCSymbol
*DoneSym
= Ctx
.createTempSymbol();
1025 const MCExpr
*DoneExpr
= MCSymbolRefExpr::create(DoneSym
, Ctx
);
1027 Out
, MCInstBuilder(X86::TEST64rr
).addReg(X86::RCX
).addReg(X86::RCX
));
1028 EmitInstruction(Out
, MCInstBuilder(X86::JE_1
).addExpr(DoneExpr
));
1030 // Instrument first and last elements in src and dst range.
1031 InstrumentMOVSBase(X86::RDI
/* DstReg */, X86::RSI
/* SrcReg */,
1032 X86::RCX
/* CntReg */, AccessSize
, Ctx
, Out
);
1034 EmitLabel(Out
, DoneSym
);
1038 X86AsmInstrumentation::X86AsmInstrumentation(const MCSubtargetInfo
*&STI
)
1041 X86AsmInstrumentation::~X86AsmInstrumentation() = default;
1043 void X86AsmInstrumentation::InstrumentAndEmitInstruction(
1044 const MCInst
&Inst
, OperandVector
&Operands
, MCContext
&Ctx
,
1045 const MCInstrInfo
&MII
, MCStreamer
&Out
) {
1046 EmitInstruction(Out
, Inst
);
1049 void X86AsmInstrumentation::EmitInstruction(MCStreamer
&Out
,
1050 const MCInst
&Inst
) {
1051 Out
.EmitInstruction(Inst
, *STI
);
1054 unsigned X86AsmInstrumentation::GetFrameRegGeneric(const MCContext
&Ctx
,
1056 if (!Out
.getNumFrameInfos()) // No active dwarf frame
1057 return X86::NoRegister
;
1058 const MCDwarfFrameInfo
&Frame
= Out
.getDwarfFrameInfos().back();
1059 if (Frame
.End
) // Active dwarf frame is closed
1060 return X86::NoRegister
;
1061 const MCRegisterInfo
*MRI
= Ctx
.getRegisterInfo();
1062 if (!MRI
) // No register info
1063 return X86::NoRegister
;
1065 if (InitialFrameReg
) {
1066 // FrameReg is set explicitly, we're instrumenting a MachineFunction.
1067 return InitialFrameReg
;
1070 return MRI
->getLLVMRegNum(Frame
.CurrentCfaRegister
, true /* IsEH */);
1073 X86AsmInstrumentation
*
1074 llvm::CreateX86AsmInstrumentation(const MCTargetOptions
&MCOptions
,
1075 const MCContext
&Ctx
,
1076 const MCSubtargetInfo
*&STI
) {
1077 Triple
T(STI
->getTargetTriple());
1078 const bool hasCompilerRTSupport
= T
.isOSLinux();
1079 if (ClAsanInstrumentAssembly
&& hasCompilerRTSupport
&&
1080 MCOptions
.SanitizeAddress
) {
1081 if (STI
->getFeatureBits()[X86::Mode32Bit
] != 0)
1082 return new X86AddressSanitizer32(STI
);
1083 if (STI
->getFeatureBits()[X86::Mode64Bit
] != 0)
1084 return new X86AddressSanitizer64(STI
);
1086 return new X86AsmInstrumentation(STI
);