1 //===-- TargetInstrInfoImpl.cpp - Target Instruction Information ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfoImpl class, it just provides default
11 // implementations of various methods.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Target/TargetInstrInfo.h"
16 #include "llvm/Target/TargetLowering.h"
17 #include "llvm/Target/TargetMachine.h"
18 #include "llvm/Target/TargetRegisterInfo.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/PostRAHazardRecognizer.h"
26 #include "llvm/CodeGen/PseudoSourceValue.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/Support/raw_ostream.h"
32 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
33 /// after it, replacing it with an unconditional branch to NewDest.
35 TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail
,
36 MachineBasicBlock
*NewDest
) const {
37 MachineBasicBlock
*MBB
= Tail
->getParent();
39 // Remove all the old successors of MBB from the CFG.
40 while (!MBB
->succ_empty())
41 MBB
->removeSuccessor(MBB
->succ_begin());
43 // Remove all the dead instructions from the end of MBB.
44 MBB
->erase(Tail
, MBB
->end());
46 // If MBB isn't immediately before MBB, insert a branch to it.
47 if (++MachineFunction::iterator(MBB
) != MachineFunction::iterator(NewDest
))
48 InsertBranch(*MBB
, NewDest
, 0, SmallVector
<MachineOperand
, 0>(),
50 MBB
->addSuccessor(NewDest
);
53 // commuteInstruction - The default implementation of this method just exchanges
54 // the two operands returned by findCommutedOpIndices.
55 MachineInstr
*TargetInstrInfoImpl::commuteInstruction(MachineInstr
*MI
,
57 const TargetInstrDesc
&TID
= MI
->getDesc();
58 bool HasDef
= TID
.getNumDefs();
59 if (HasDef
&& !MI
->getOperand(0).isReg())
60 // No idea how to commute this instruction. Target should implement its own.
63 if (!findCommutedOpIndices(MI
, Idx1
, Idx2
)) {
65 raw_string_ostream
Msg(msg
);
66 Msg
<< "Don't know how to commute: " << *MI
;
67 report_fatal_error(Msg
.str());
70 assert(MI
->getOperand(Idx1
).isReg() && MI
->getOperand(Idx2
).isReg() &&
71 "This only knows how to commute register operands so far");
72 unsigned Reg1
= MI
->getOperand(Idx1
).getReg();
73 unsigned Reg2
= MI
->getOperand(Idx2
).getReg();
74 bool Reg1IsKill
= MI
->getOperand(Idx1
).isKill();
75 bool Reg2IsKill
= MI
->getOperand(Idx2
).isKill();
76 bool ChangeReg0
= false;
77 if (HasDef
&& MI
->getOperand(0).getReg() == Reg1
) {
78 // Must be two address instruction!
79 assert(MI
->getDesc().getOperandConstraint(0, TOI::TIED_TO
) &&
80 "Expecting a two-address instruction!");
86 // Create a new instruction.
87 unsigned Reg0
= HasDef
88 ? (ChangeReg0
? Reg2
: MI
->getOperand(0).getReg()) : 0;
89 bool Reg0IsDead
= HasDef
? MI
->getOperand(0).isDead() : false;
90 MachineFunction
&MF
= *MI
->getParent()->getParent();
92 return BuildMI(MF
, MI
->getDebugLoc(), MI
->getDesc())
93 .addReg(Reg0
, RegState::Define
| getDeadRegState(Reg0IsDead
))
94 .addReg(Reg2
, getKillRegState(Reg2IsKill
))
95 .addReg(Reg1
, getKillRegState(Reg2IsKill
));
97 return BuildMI(MF
, MI
->getDebugLoc(), MI
->getDesc())
98 .addReg(Reg2
, getKillRegState(Reg2IsKill
))
99 .addReg(Reg1
, getKillRegState(Reg2IsKill
));
103 MI
->getOperand(0).setReg(Reg2
);
104 MI
->getOperand(Idx2
).setReg(Reg1
);
105 MI
->getOperand(Idx1
).setReg(Reg2
);
106 MI
->getOperand(Idx2
).setIsKill(Reg1IsKill
);
107 MI
->getOperand(Idx1
).setIsKill(Reg2IsKill
);
111 /// findCommutedOpIndices - If specified MI is commutable, return the two
112 /// operand indices that would swap value. Return true if the instruction
113 /// is not in a form which this routine understands.
114 bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr
*MI
,
116 unsigned &SrcOpIdx2
) const {
117 const TargetInstrDesc
&TID
= MI
->getDesc();
118 if (!TID
.isCommutable())
120 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
121 // is not true, then the target must implement this.
122 SrcOpIdx1
= TID
.getNumDefs();
123 SrcOpIdx2
= SrcOpIdx1
+ 1;
124 if (!MI
->getOperand(SrcOpIdx1
).isReg() ||
125 !MI
->getOperand(SrcOpIdx2
).isReg())
132 bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr
*MI
,
133 const SmallVectorImpl
<MachineOperand
> &Pred
) const {
134 bool MadeChange
= false;
135 const TargetInstrDesc
&TID
= MI
->getDesc();
136 if (!TID
.isPredicable())
139 for (unsigned j
= 0, i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
140 if (TID
.OpInfo
[i
].isPredicate()) {
141 MachineOperand
&MO
= MI
->getOperand(i
);
143 MO
.setReg(Pred
[j
].getReg());
145 } else if (MO
.isImm()) {
146 MO
.setImm(Pred
[j
].getImm());
148 } else if (MO
.isMBB()) {
149 MO
.setMBB(Pred
[j
].getMBB());
158 void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock
&MBB
,
159 MachineBasicBlock::iterator I
,
162 const MachineInstr
*Orig
,
163 const TargetRegisterInfo
&TRI
) const {
164 MachineInstr
*MI
= MBB
.getParent()->CloneMachineInstr(Orig
);
165 MI
->substituteRegister(MI
->getOperand(0).getReg(), DestReg
, SubIdx
, TRI
);
169 bool TargetInstrInfoImpl::produceSameValue(const MachineInstr
*MI0
,
170 const MachineInstr
*MI1
) const {
171 return MI0
->isIdenticalTo(MI1
, MachineInstr::IgnoreVRegDefs
);
174 MachineInstr
*TargetInstrInfoImpl::duplicate(MachineInstr
*Orig
,
175 MachineFunction
&MF
) const {
176 assert(!Orig
->getDesc().isNotDuplicable() &&
177 "Instruction cannot be duplicated");
178 return MF
.CloneMachineInstr(Orig
);
181 // If the COPY instruction in MI can be folded to a stack operation, return
182 // the register class to use.
183 static const TargetRegisterClass
*canFoldCopy(const MachineInstr
*MI
,
185 assert(MI
->isCopy() && "MI must be a COPY instruction");
186 if (MI
->getNumOperands() != 2)
188 assert(FoldIdx
<2 && "FoldIdx refers no nonexistent operand");
190 const MachineOperand
&FoldOp
= MI
->getOperand(FoldIdx
);
191 const MachineOperand
&LiveOp
= MI
->getOperand(1-FoldIdx
);
193 if (FoldOp
.getSubReg() || LiveOp
.getSubReg())
196 unsigned FoldReg
= FoldOp
.getReg();
197 unsigned LiveReg
= LiveOp
.getReg();
199 assert(TargetRegisterInfo::isVirtualRegister(FoldReg
) &&
200 "Cannot fold physregs");
202 const MachineRegisterInfo
&MRI
= MI
->getParent()->getParent()->getRegInfo();
203 const TargetRegisterClass
*RC
= MRI
.getRegClass(FoldReg
);
205 if (TargetRegisterInfo::isPhysicalRegister(LiveOp
.getReg()))
206 return RC
->contains(LiveOp
.getReg()) ? RC
: 0;
208 const TargetRegisterClass
*LiveRC
= MRI
.getRegClass(LiveReg
);
209 if (RC
== LiveRC
|| RC
->hasSubClass(LiveRC
))
212 // FIXME: Allow folding when register classes are memory compatible.
216 bool TargetInstrInfoImpl::
217 canFoldMemoryOperand(const MachineInstr
*MI
,
218 const SmallVectorImpl
<unsigned> &Ops
) const {
219 return MI
->isCopy() && Ops
.size() == 1 && canFoldCopy(MI
, Ops
[0]);
222 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
223 /// slot into the specified machine instruction for the specified operand(s).
224 /// If this is possible, a new instruction is returned with the specified
225 /// operand folded, otherwise NULL is returned. The client is responsible for
226 /// removing the old instruction and adding the new one in the instruction
229 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI
,
230 const SmallVectorImpl
<unsigned> &Ops
,
233 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
234 if (MI
->getOperand(Ops
[i
]).isDef())
235 Flags
|= MachineMemOperand::MOStore
;
237 Flags
|= MachineMemOperand::MOLoad
;
239 MachineBasicBlock
*MBB
= MI
->getParent();
240 assert(MBB
&& "foldMemoryOperand needs an inserted instruction");
241 MachineFunction
&MF
= *MBB
->getParent();
243 // Ask the target to do the actual folding.
244 if (MachineInstr
*NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, FI
)) {
245 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
246 assert((!(Flags
& MachineMemOperand::MOStore
) ||
247 NewMI
->getDesc().mayStore()) &&
248 "Folded a def to a non-store!");
249 assert((!(Flags
& MachineMemOperand::MOLoad
) ||
250 NewMI
->getDesc().mayLoad()) &&
251 "Folded a use to a non-load!");
252 const MachineFrameInfo
&MFI
= *MF
.getFrameInfo();
253 assert(MFI
.getObjectOffset(FI
) != -1);
254 MachineMemOperand
*MMO
=
255 MF
.getMachineMemOperand(
256 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI
)),
257 Flags
, MFI
.getObjectSize(FI
),
258 MFI
.getObjectAlignment(FI
));
259 NewMI
->addMemOperand(MF
, MMO
);
261 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
262 return MBB
->insert(MI
, NewMI
);
265 // Straight COPY may fold as load/store.
266 if (!MI
->isCopy() || Ops
.size() != 1)
269 const TargetRegisterClass
*RC
= canFoldCopy(MI
, Ops
[0]);
273 const MachineOperand
&MO
= MI
->getOperand(1-Ops
[0]);
274 MachineBasicBlock::iterator Pos
= MI
;
275 const TargetRegisterInfo
*TRI
= MF
.getTarget().getRegisterInfo();
277 if (Flags
== MachineMemOperand::MOStore
)
278 storeRegToStackSlot(*MBB
, Pos
, MO
.getReg(), MO
.isKill(), FI
, RC
, TRI
);
280 loadRegFromStackSlot(*MBB
, Pos
, MO
.getReg(), FI
, RC
, TRI
);
284 /// foldMemoryOperand - Same as the previous version except it allows folding
285 /// of any load and store from / to any address, not just from a specific
288 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI
,
289 const SmallVectorImpl
<unsigned> &Ops
,
290 MachineInstr
* LoadMI
) const {
291 assert(LoadMI
->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
293 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
294 assert(MI
->getOperand(Ops
[i
]).isUse() && "Folding load into def!");
296 MachineBasicBlock
&MBB
= *MI
->getParent();
297 MachineFunction
&MF
= *MBB
.getParent();
299 // Ask the target to do the actual folding.
300 MachineInstr
*NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, LoadMI
);
301 if (!NewMI
) return 0;
303 NewMI
= MBB
.insert(MI
, NewMI
);
305 // Copy the memoperands from the load to the folded instruction.
306 NewMI
->setMemRefs(LoadMI
->memoperands_begin(),
307 LoadMI
->memoperands_end());
312 bool TargetInstrInfo::
313 isReallyTriviallyReMaterializableGeneric(const MachineInstr
*MI
,
314 AliasAnalysis
*AA
) const {
315 const MachineFunction
&MF
= *MI
->getParent()->getParent();
316 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
317 const TargetMachine
&TM
= MF
.getTarget();
318 const TargetInstrInfo
&TII
= *TM
.getInstrInfo();
319 const TargetRegisterInfo
&TRI
= *TM
.getRegisterInfo();
321 // A load from a fixed stack slot can be rematerialized. This may be
322 // redundant with subsequent checks, but it's target-independent,
323 // simple, and a common case.
325 if (TII
.isLoadFromStackSlot(MI
, FrameIdx
) &&
326 MF
.getFrameInfo()->isImmutableObjectIndex(FrameIdx
))
329 const TargetInstrDesc
&TID
= MI
->getDesc();
331 // Avoid instructions obviously unsafe for remat.
332 if (TID
.hasUnmodeledSideEffects() || TID
.isNotDuplicable() ||
336 // Avoid instructions which load from potentially varying memory.
337 if (TID
.mayLoad() && !MI
->isInvariantLoad(AA
))
340 // If any of the registers accessed are non-constant, conservatively assume
341 // the instruction is not rematerializable.
342 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
343 const MachineOperand
&MO
= MI
->getOperand(i
);
344 if (!MO
.isReg()) continue;
345 unsigned Reg
= MO
.getReg();
349 // Check for a well-behaved physical register.
350 if (TargetRegisterInfo::isPhysicalRegister(Reg
)) {
352 // If the physreg has no defs anywhere, it's just an ambient register
353 // and we can freely move its uses. Alternatively, if it's allocatable,
354 // it could get allocated to something with a def during allocation.
355 if (!MRI
.def_empty(Reg
))
357 BitVector AllocatableRegs
= TRI
.getAllocatableSet(MF
, 0);
358 if (AllocatableRegs
.test(Reg
))
360 // Check for a def among the register's aliases too.
361 for (const unsigned *Alias
= TRI
.getAliasSet(Reg
); *Alias
; ++Alias
) {
362 unsigned AliasReg
= *Alias
;
363 if (!MRI
.def_empty(AliasReg
))
365 if (AllocatableRegs
.test(AliasReg
))
369 // A physreg def. We can't remat it.
375 // Only allow one virtual-register def, and that in the first operand.
376 if (MO
.isDef() != (i
== 0))
379 // For the def, it should be the only def of that register.
380 if (MO
.isDef() && (llvm::next(MRI
.def_begin(Reg
)) != MRI
.def_end() ||
384 // Don't allow any virtual-register uses. Rematting an instruction with
385 // virtual register uses would length the live ranges of the uses, which
386 // is not necessarily a good idea, certainly not "trivial".
391 // Everything checked out.
395 /// isSchedulingBoundary - Test if the given instruction should be
396 /// considered a scheduling boundary. This primarily includes labels
398 bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr
*MI
,
399 const MachineBasicBlock
*MBB
,
400 const MachineFunction
&MF
) const{
401 // Terminators and labels can't be scheduled around.
402 if (MI
->getDesc().isTerminator() || MI
->isLabel())
405 // Don't attempt to schedule around any instruction that defines
406 // a stack-oriented pointer, as it's unlikely to be profitable. This
407 // saves compile time, because it doesn't require every single
408 // stack slot reference to depend on the instruction that does the
410 const TargetLowering
&TLI
= *MF
.getTarget().getTargetLowering();
411 if (MI
->definesRegister(TLI
.getStackPointerRegisterToSaveRestore()))
417 // Default implementation of CreateTargetPostRAHazardRecognizer.
418 ScheduleHazardRecognizer
*TargetInstrInfoImpl::
419 CreateTargetPostRAHazardRecognizer(const InstrItineraryData
*II
) const {
420 return (ScheduleHazardRecognizer
*)new PostRAHazardRecognizer(II
);