[llvm-exegesis] Fix missing std::move.
[llvm-complete.git] / lib / Target / X86 / X86RetpolineThunks.cpp
blobf62e89eb1ba4b6cc39ffa786a8fc2af2f2aa0871
1 //======- X86RetpolineThunks.cpp - Construct retpoline thunks for x86 --=====//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// Pass that injects an MI thunk implementing a "retpoline". This is
12 /// a RET-implemented trampoline that is used to lower indirect calls in a way
13 /// that prevents speculation on some x86 processors and can be used to mitigate
14 /// security vulnerabilities due to targeted speculative execution and side
15 /// channels such as CVE-2017-5715.
16 ///
17 /// TODO(chandlerc): All of this code could use better comments and
18 /// documentation.
19 ///
20 //===----------------------------------------------------------------------===//
22 #include "X86.h"
23 #include "X86InstrBuilder.h"
24 #include "X86Subtarget.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/TargetPassConfig.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
37 using namespace llvm;
39 #define DEBUG_TYPE "x86-retpoline-thunks"
41 static const char ThunkNamePrefix[] = "__llvm_retpoline_";
42 static const char R11ThunkName[] = "__llvm_retpoline_r11";
43 static const char EAXThunkName[] = "__llvm_retpoline_eax";
44 static const char ECXThunkName[] = "__llvm_retpoline_ecx";
45 static const char EDXThunkName[] = "__llvm_retpoline_edx";
46 static const char EDIThunkName[] = "__llvm_retpoline_edi";
48 namespace {
49 class X86RetpolineThunks : public MachineFunctionPass {
50 public:
51 static char ID;
53 X86RetpolineThunks() : MachineFunctionPass(ID) {}
55 StringRef getPassName() const override { return "X86 Retpoline Thunks"; }
57 bool doInitialization(Module &M) override;
58 bool runOnMachineFunction(MachineFunction &F) override;
60 void getAnalysisUsage(AnalysisUsage &AU) const override {
61 MachineFunctionPass::getAnalysisUsage(AU);
62 AU.addRequired<MachineModuleInfo>();
63 AU.addPreserved<MachineModuleInfo>();
66 private:
67 MachineModuleInfo *MMI;
68 const TargetMachine *TM;
69 bool Is64Bit;
70 const X86Subtarget *STI;
71 const X86InstrInfo *TII;
73 bool InsertedThunks;
75 void createThunkFunction(Module &M, StringRef Name);
76 void insertRegReturnAddrClobber(MachineBasicBlock &MBB, unsigned Reg);
77 void populateThunk(MachineFunction &MF, Optional<unsigned> Reg = None);
80 } // end anonymous namespace
82 FunctionPass *llvm::createX86RetpolineThunksPass() {
83 return new X86RetpolineThunks();
86 char X86RetpolineThunks::ID = 0;
88 bool X86RetpolineThunks::doInitialization(Module &M) {
89 InsertedThunks = false;
90 return false;
93 bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) {
94 LLVM_DEBUG(dbgs() << getPassName() << '\n');
96 TM = &MF.getTarget();;
97 STI = &MF.getSubtarget<X86Subtarget>();
98 TII = STI->getInstrInfo();
99 Is64Bit = TM->getTargetTriple().getArch() == Triple::x86_64;
101 MMI = &getAnalysis<MachineModuleInfo>();
102 Module &M = const_cast<Module &>(*MMI->getModule());
104 // If this function is not a thunk, check to see if we need to insert
105 // a thunk.
106 if (!MF.getName().startswith(ThunkNamePrefix)) {
107 // If we've already inserted a thunk, nothing else to do.
108 if (InsertedThunks)
109 return false;
111 // Only add a thunk if one of the functions has the retpoline feature
112 // enabled in its subtarget, and doesn't enable external thunks.
113 // FIXME: Conditionalize on indirect calls so we don't emit a thunk when
114 // nothing will end up calling it.
115 // FIXME: It's a little silly to look at every function just to enumerate
116 // the subtargets, but eventually we'll want to look at them for indirect
117 // calls, so maybe this is OK.
118 if ((!STI->useRetpolineIndirectCalls() &&
119 !STI->useRetpolineIndirectBranches()) ||
120 STI->useRetpolineExternalThunk())
121 return false;
123 // Otherwise, we need to insert the thunk.
124 // WARNING: This is not really a well behaving thing to do in a function
125 // pass. We extract the module and insert a new function (and machine
126 // function) directly into the module.
127 if (Is64Bit)
128 createThunkFunction(M, R11ThunkName);
129 else
130 for (StringRef Name :
131 {EAXThunkName, ECXThunkName, EDXThunkName, EDIThunkName})
132 createThunkFunction(M, Name);
133 InsertedThunks = true;
134 return true;
137 // If this *is* a thunk function, we need to populate it with the correct MI.
138 if (Is64Bit) {
139 assert(MF.getName() == "__llvm_retpoline_r11" &&
140 "Should only have an r11 thunk on 64-bit targets");
142 // __llvm_retpoline_r11:
143 // callq .Lr11_call_target
144 // .Lr11_capture_spec:
145 // pause
146 // lfence
147 // jmp .Lr11_capture_spec
148 // .align 16
149 // .Lr11_call_target:
150 // movq %r11, (%rsp)
151 // retq
152 populateThunk(MF, X86::R11);
153 } else {
154 // For 32-bit targets we need to emit a collection of thunks for various
155 // possible scratch registers as well as a fallback that uses EDI, which is
156 // normally callee saved.
157 // __llvm_retpoline_eax:
158 // calll .Leax_call_target
159 // .Leax_capture_spec:
160 // pause
161 // jmp .Leax_capture_spec
162 // .align 16
163 // .Leax_call_target:
164 // movl %eax, (%esp) # Clobber return addr
165 // retl
167 // __llvm_retpoline_ecx:
168 // ... # Same setup
169 // movl %ecx, (%esp)
170 // retl
172 // __llvm_retpoline_edx:
173 // ... # Same setup
174 // movl %edx, (%esp)
175 // retl
177 // __llvm_retpoline_edi:
178 // ... # Same setup
179 // movl %edi, (%esp)
180 // retl
181 if (MF.getName() == EAXThunkName)
182 populateThunk(MF, X86::EAX);
183 else if (MF.getName() == ECXThunkName)
184 populateThunk(MF, X86::ECX);
185 else if (MF.getName() == EDXThunkName)
186 populateThunk(MF, X86::EDX);
187 else if (MF.getName() == EDIThunkName)
188 populateThunk(MF, X86::EDI);
189 else
190 llvm_unreachable("Invalid thunk name on x86-32!");
193 return true;
196 void X86RetpolineThunks::createThunkFunction(Module &M, StringRef Name) {
197 assert(Name.startswith(ThunkNamePrefix) &&
198 "Created a thunk with an unexpected prefix!");
200 LLVMContext &Ctx = M.getContext();
201 auto Type = FunctionType::get(Type::getVoidTy(Ctx), false);
202 Function *F =
203 Function::Create(Type, GlobalValue::LinkOnceODRLinkage, Name, &M);
204 F->setVisibility(GlobalValue::HiddenVisibility);
205 F->setComdat(M.getOrInsertComdat(Name));
207 // Add Attributes so that we don't create a frame, unwind information, or
208 // inline.
209 AttrBuilder B;
210 B.addAttribute(llvm::Attribute::NoUnwind);
211 B.addAttribute(llvm::Attribute::Naked);
212 F->addAttributes(llvm::AttributeList::FunctionIndex, B);
214 // Populate our function a bit so that we can verify.
215 BasicBlock *Entry = BasicBlock::Create(Ctx, "entry", F);
216 IRBuilder<> Builder(Entry);
218 Builder.CreateRetVoid();
220 // MachineFunctions/MachineBasicBlocks aren't created automatically for the
221 // IR-level constructs we already made. Create them and insert them into the
222 // module.
223 MachineFunction &MF = MMI->getOrCreateMachineFunction(*F);
224 MachineBasicBlock *EntryMBB = MF.CreateMachineBasicBlock(Entry);
226 // Insert EntryMBB into MF. It's not in the module until we do this.
227 MF.insert(MF.end(), EntryMBB);
230 void X86RetpolineThunks::insertRegReturnAddrClobber(MachineBasicBlock &MBB,
231 unsigned Reg) {
232 const unsigned MovOpc = Is64Bit ? X86::MOV64mr : X86::MOV32mr;
233 const unsigned SPReg = Is64Bit ? X86::RSP : X86::ESP;
234 addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(MovOpc)), SPReg, false, 0)
235 .addReg(Reg);
238 void X86RetpolineThunks::populateThunk(MachineFunction &MF,
239 Optional<unsigned> Reg) {
240 // Set MF properties. We never use vregs...
241 MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
243 MachineBasicBlock *Entry = &MF.front();
244 Entry->clear();
246 MachineBasicBlock *CaptureSpec = MF.CreateMachineBasicBlock(Entry->getBasicBlock());
247 MachineBasicBlock *CallTarget = MF.CreateMachineBasicBlock(Entry->getBasicBlock());
248 MF.push_back(CaptureSpec);
249 MF.push_back(CallTarget);
251 const unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
252 const unsigned RetOpc = Is64Bit ? X86::RETQ : X86::RETL;
254 BuildMI(Entry, DebugLoc(), TII->get(CallOpc)).addMBB(CallTarget);
255 Entry->addSuccessor(CallTarget);
256 Entry->addSuccessor(CaptureSpec);
257 CallTarget->setHasAddressTaken();
259 // In the capture loop for speculation, we want to stop the processor from
260 // speculating as fast as possible. On Intel processors, the PAUSE instruction
261 // will block speculation without consuming any execution resources. On AMD
262 // processors, the PAUSE instruction is (essentially) a nop, so we also use an
263 // LFENCE instruction which they have advised will stop speculation as well
264 // with minimal resource utilization. We still end the capture with a jump to
265 // form an infinite loop to fully guarantee that no matter what implementation
266 // of the x86 ISA, speculating this code path never escapes.
267 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::PAUSE));
268 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::LFENCE));
269 BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::JMP_1)).addMBB(CaptureSpec);
270 CaptureSpec->setHasAddressTaken();
271 CaptureSpec->addSuccessor(CaptureSpec);
273 CallTarget->setAlignment(4);
274 insertRegReturnAddrClobber(*CallTarget, *Reg);
275 BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));