1 //===- MipsOptimizePICCall.cpp - Optimize PIC Calls -----------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass eliminates unnecessary instructions that set up $gp and replace
11 // instructions that load target function addresses with copy instructions.
13 //===----------------------------------------------------------------------===//
15 #include "MCTargetDesc/MipsBaseInfo.h"
17 #include "MipsRegisterInfo.h"
18 #include "MipsSubtarget.h"
19 #include "llvm/ADT/PointerUnion.h"
20 #include "llvm/ADT/ScopedHashTable.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineDominators.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/TargetInstrInfo.h"
31 #include "llvm/CodeGen/TargetOpcodes.h"
32 #include "llvm/CodeGen/TargetRegisterInfo.h"
33 #include "llvm/CodeGen/TargetSubtargetInfo.h"
34 #include "llvm/Support/Allocator.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MachineValueType.h"
38 #include "llvm/Support/RecyclingAllocator.h"
45 #define DEBUG_TYPE "optimize-mips-pic-call"
47 static cl::opt
<bool> LoadTargetFromGOT("mips-load-target-from-got",
49 cl::desc("Load target address from GOT"),
52 static cl::opt
<bool> EraseGPOpnd("mips-erase-gp-opnd",
53 cl::init(true), cl::desc("Erase GP Operand"),
58 using ValueType
= PointerUnion
<const Value
*, const PseudoSourceValue
*>;
59 using CntRegP
= std::pair
<unsigned, unsigned>;
60 using AllocatorTy
= RecyclingAllocator
<BumpPtrAllocator
,
61 ScopedHashTableVal
<ValueType
, CntRegP
>>;
62 using ScopedHTType
= ScopedHashTable
<ValueType
, CntRegP
,
63 DenseMapInfo
<ValueType
>, AllocatorTy
>;
67 MBBInfo(MachineDomTreeNode
*N
);
69 const MachineDomTreeNode
*getNode() const;
70 bool isVisited() const;
71 void preVisit(ScopedHTType
&ScopedHT
);
75 MachineDomTreeNode
*Node
;
76 ScopedHTType::ScopeTy
*HTScope
;
79 class OptimizePICCall
: public MachineFunctionPass
{
81 OptimizePICCall() : MachineFunctionPass(ID
) {}
83 StringRef
getPassName() const override
{ return "Mips OptimizePICCall"; }
85 bool runOnMachineFunction(MachineFunction
&F
) override
;
87 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
88 AU
.addRequired
<MachineDominatorTree
>();
89 MachineFunctionPass::getAnalysisUsage(AU
);
94 bool visitNode(MBBInfo
&MBBI
);
96 /// Test if MI jumps to a function via a register.
98 /// Also, return the virtual register containing the target function's address
99 /// and the underlying object in Reg and Val respectively, if the function's
100 /// address can be resolved lazily.
101 bool isCallViaRegister(MachineInstr
&MI
, unsigned &Reg
,
102 ValueType
&Val
) const;
104 /// Return the number of instructions that dominate the current
105 /// instruction and load the function address from object Entry.
106 unsigned getCount(ValueType Entry
);
108 /// Return the destination virtual register of the last instruction
109 /// that loads from object Entry.
110 unsigned getReg(ValueType Entry
);
113 void incCntAndSetReg(ValueType Entry
, unsigned Reg
);
115 ScopedHTType ScopedHT
;
120 } // end of anonymous namespace
122 char OptimizePICCall::ID
= 0;
124 /// Return the first MachineOperand of MI if it is a used virtual register.
125 static MachineOperand
*getCallTargetRegOpnd(MachineInstr
&MI
) {
126 if (MI
.getNumOperands() == 0)
129 MachineOperand
&MO
= MI
.getOperand(0);
131 if (!MO
.isReg() || !MO
.isUse() ||
132 !TargetRegisterInfo::isVirtualRegister(MO
.getReg()))
138 /// Return type of register Reg.
139 static MVT::SimpleValueType
getRegTy(unsigned Reg
, MachineFunction
&MF
) {
140 const TargetRegisterInfo
&TRI
= *MF
.getSubtarget().getRegisterInfo();
141 const TargetRegisterClass
*RC
= MF
.getRegInfo().getRegClass(Reg
);
142 assert(TRI
.legalclasstypes_end(*RC
) - TRI
.legalclasstypes_begin(*RC
) == 1);
143 return *TRI
.legalclasstypes_begin(*RC
);
146 /// Do the following transformation:
152 static void setCallTargetReg(MachineBasicBlock
*MBB
,
153 MachineBasicBlock::iterator I
) {
154 MachineFunction
&MF
= *MBB
->getParent();
155 const TargetInstrInfo
&TII
= *MF
.getSubtarget().getInstrInfo();
156 unsigned SrcReg
= I
->getOperand(0).getReg();
157 unsigned DstReg
= getRegTy(SrcReg
, MF
) == MVT::i32
? Mips::T9
: Mips::T9_64
;
158 BuildMI(*MBB
, I
, I
->getDebugLoc(), TII
.get(TargetOpcode::COPY
), DstReg
)
160 I
->getOperand(0).setReg(DstReg
);
163 /// Search MI's operands for register GP and erase it.
164 static void eraseGPOpnd(MachineInstr
&MI
) {
168 MachineFunction
&MF
= *MI
.getParent()->getParent();
169 MVT::SimpleValueType Ty
= getRegTy(MI
.getOperand(0).getReg(), MF
);
170 unsigned Reg
= Ty
== MVT::i32
? Mips::GP
: Mips::GP_64
;
172 for (unsigned I
= 0; I
< MI
.getNumOperands(); ++I
) {
173 MachineOperand
&MO
= MI
.getOperand(I
);
174 if (MO
.isReg() && MO
.getReg() == Reg
) {
180 llvm_unreachable(nullptr);
183 MBBInfo::MBBInfo(MachineDomTreeNode
*N
) : Node(N
), HTScope(nullptr) {}
185 const MachineDomTreeNode
*MBBInfo::getNode() const { return Node
; }
187 bool MBBInfo::isVisited() const { return HTScope
; }
189 void MBBInfo::preVisit(ScopedHTType
&ScopedHT
) {
190 HTScope
= new ScopedHTType::ScopeTy(ScopedHT
);
193 void MBBInfo::postVisit() {
197 // OptimizePICCall methods.
198 bool OptimizePICCall::runOnMachineFunction(MachineFunction
&F
) {
199 if (static_cast<const MipsSubtarget
&>(F
.getSubtarget()).inMips16Mode())
202 // Do a pre-order traversal of the dominator tree.
203 MachineDominatorTree
*MDT
= &getAnalysis
<MachineDominatorTree
>();
204 bool Changed
= false;
206 SmallVector
<MBBInfo
, 8> WorkList(1, MBBInfo(MDT
->getRootNode()));
208 while (!WorkList
.empty()) {
209 MBBInfo
&MBBI
= WorkList
.back();
211 // If this MBB has already been visited, destroy the scope for the MBB and
212 // pop it from the work list.
213 if (MBBI
.isVisited()) {
219 // Visit the MBB and add its children to the work list.
220 MBBI
.preVisit(ScopedHT
);
221 Changed
|= visitNode(MBBI
);
222 const MachineDomTreeNode
*Node
= MBBI
.getNode();
223 const std::vector
<MachineDomTreeNode
*> &Children
= Node
->getChildren();
224 WorkList
.append(Children
.begin(), Children
.end());
230 bool OptimizePICCall::visitNode(MBBInfo
&MBBI
) {
231 bool Changed
= false;
232 MachineBasicBlock
*MBB
= MBBI
.getNode()->getBlock();
234 for (MachineBasicBlock::iterator I
= MBB
->begin(), E
= MBB
->end(); I
!= E
;
239 // Skip instructions that are not call instructions via registers.
240 if (!isCallViaRegister(*I
, Reg
, Entry
))
244 unsigned N
= getCount(Entry
);
247 // If a function has been called more than twice, we do not have to emit a
248 // load instruction to get the function address from the GOT, but can
249 // instead reuse the address that has been loaded before.
250 if (N
>= 2 && !LoadTargetFromGOT
)
251 getCallTargetRegOpnd(*I
)->setReg(getReg(Entry
));
253 // Erase the $gp operand if this isn't the first time a function has
254 // been called. $gp needs to be set up only if the function call can go
255 // through a lazy binding stub.
260 incCntAndSetReg(Entry
, Reg
);
262 setCallTargetReg(MBB
, I
);
268 bool OptimizePICCall::isCallViaRegister(MachineInstr
&MI
, unsigned &Reg
,
269 ValueType
&Val
) const {
273 MachineOperand
*MO
= getCallTargetRegOpnd(MI
);
275 // Return if MI is not a function call via a register.
279 // Get the instruction that loads the function address from the GOT.
282 MachineRegisterInfo
&MRI
= MI
.getParent()->getParent()->getRegInfo();
283 MachineInstr
*DefMI
= MRI
.getVRegDef(Reg
);
287 // See if DefMI is an instruction that loads from a GOT entry that holds the
288 // address of a lazy binding stub.
289 if (!DefMI
->mayLoad() || DefMI
->getNumOperands() < 3)
292 unsigned Flags
= DefMI
->getOperand(2).getTargetFlags();
294 if (Flags
!= MipsII::MO_GOT_CALL
&& Flags
!= MipsII::MO_CALL_LO16
)
297 // Return the underlying object for the GOT entry in Val.
298 assert(DefMI
->hasOneMemOperand());
299 Val
= (*DefMI
->memoperands_begin())->getValue();
301 Val
= (*DefMI
->memoperands_begin())->getPseudoValue();
305 unsigned OptimizePICCall::getCount(ValueType Entry
) {
306 return ScopedHT
.lookup(Entry
).first
;
309 unsigned OptimizePICCall::getReg(ValueType Entry
) {
310 unsigned Reg
= ScopedHT
.lookup(Entry
).second
;
315 void OptimizePICCall::incCntAndSetReg(ValueType Entry
, unsigned Reg
) {
316 CntRegP P
= ScopedHT
.lookup(Entry
);
317 ScopedHT
.insert(Entry
, std::make_pair(P
.first
+ 1, Reg
));
320 /// Return an OptimizeCall object.
321 FunctionPass
*llvm::createMipsOptimizePICCallPass() {
322 return new OptimizePICCall();