1 //===-- TargetInstrInfoImpl.cpp - Target Instruction Information ----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the TargetInstrInfoImpl class, it just provides default
11 // implementations of various methods.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Target/TargetInstrInfo.h"
16 #include "llvm/Target/TargetLowering.h"
17 #include "llvm/Target/TargetMachine.h"
18 #include "llvm/Target/TargetRegisterInfo.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/CodeGen/MachineFrameInfo.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineMemOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
26 #include "llvm/CodeGen/PseudoSourceValue.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
33 static cl::opt
<bool> DisableHazardRecognizer(
34 "disable-sched-hazard", cl::Hidden
, cl::init(false),
35 cl::desc("Disable hazard detection during preRA scheduling"));
37 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
38 /// after it, replacing it with an unconditional branch to NewDest.
40 TargetInstrInfoImpl::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail
,
41 MachineBasicBlock
*NewDest
) const {
42 MachineBasicBlock
*MBB
= Tail
->getParent();
44 // Remove all the old successors of MBB from the CFG.
45 while (!MBB
->succ_empty())
46 MBB
->removeSuccessor(MBB
->succ_begin());
48 // Remove all the dead instructions from the end of MBB.
49 MBB
->erase(Tail
, MBB
->end());
51 // If MBB isn't immediately before MBB, insert a branch to it.
52 if (++MachineFunction::iterator(MBB
) != MachineFunction::iterator(NewDest
))
53 InsertBranch(*MBB
, NewDest
, 0, SmallVector
<MachineOperand
, 0>(),
55 MBB
->addSuccessor(NewDest
);
58 // commuteInstruction - The default implementation of this method just exchanges
59 // the two operands returned by findCommutedOpIndices.
60 MachineInstr
*TargetInstrInfoImpl::commuteInstruction(MachineInstr
*MI
,
62 const MCInstrDesc
&MCID
= MI
->getDesc();
63 bool HasDef
= MCID
.getNumDefs();
64 if (HasDef
&& !MI
->getOperand(0).isReg())
65 // No idea how to commute this instruction. Target should implement its own.
68 if (!findCommutedOpIndices(MI
, Idx1
, Idx2
)) {
70 raw_string_ostream
Msg(msg
);
71 Msg
<< "Don't know how to commute: " << *MI
;
72 report_fatal_error(Msg
.str());
75 assert(MI
->getOperand(Idx1
).isReg() && MI
->getOperand(Idx2
).isReg() &&
76 "This only knows how to commute register operands so far");
77 unsigned Reg1
= MI
->getOperand(Idx1
).getReg();
78 unsigned Reg2
= MI
->getOperand(Idx2
).getReg();
79 bool Reg1IsKill
= MI
->getOperand(Idx1
).isKill();
80 bool Reg2IsKill
= MI
->getOperand(Idx2
).isKill();
81 bool ChangeReg0
= false;
82 if (HasDef
&& MI
->getOperand(0).getReg() == Reg1
) {
83 // Must be two address instruction!
84 assert(MI
->getDesc().getOperandConstraint(0, MCOI::TIED_TO
) &&
85 "Expecting a two-address instruction!");
91 // Create a new instruction.
92 unsigned Reg0
= HasDef
93 ? (ChangeReg0
? Reg2
: MI
->getOperand(0).getReg()) : 0;
94 bool Reg0IsDead
= HasDef
? MI
->getOperand(0).isDead() : false;
95 MachineFunction
&MF
= *MI
->getParent()->getParent();
97 return BuildMI(MF
, MI
->getDebugLoc(), MI
->getDesc())
98 .addReg(Reg0
, RegState::Define
| getDeadRegState(Reg0IsDead
))
99 .addReg(Reg2
, getKillRegState(Reg2IsKill
))
100 .addReg(Reg1
, getKillRegState(Reg2IsKill
));
102 return BuildMI(MF
, MI
->getDebugLoc(), MI
->getDesc())
103 .addReg(Reg2
, getKillRegState(Reg2IsKill
))
104 .addReg(Reg1
, getKillRegState(Reg2IsKill
));
108 MI
->getOperand(0).setReg(Reg2
);
109 MI
->getOperand(Idx2
).setReg(Reg1
);
110 MI
->getOperand(Idx1
).setReg(Reg2
);
111 MI
->getOperand(Idx2
).setIsKill(Reg1IsKill
);
112 MI
->getOperand(Idx1
).setIsKill(Reg2IsKill
);
116 /// findCommutedOpIndices - If specified MI is commutable, return the two
117 /// operand indices that would swap value. Return true if the instruction
118 /// is not in a form which this routine understands.
119 bool TargetInstrInfoImpl::findCommutedOpIndices(MachineInstr
*MI
,
121 unsigned &SrcOpIdx2
) const {
122 const MCInstrDesc
&MCID
= MI
->getDesc();
123 if (!MCID
.isCommutable())
125 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
126 // is not true, then the target must implement this.
127 SrcOpIdx1
= MCID
.getNumDefs();
128 SrcOpIdx2
= SrcOpIdx1
+ 1;
129 if (!MI
->getOperand(SrcOpIdx1
).isReg() ||
130 !MI
->getOperand(SrcOpIdx2
).isReg())
137 bool TargetInstrInfoImpl::PredicateInstruction(MachineInstr
*MI
,
138 const SmallVectorImpl
<MachineOperand
> &Pred
) const {
139 bool MadeChange
= false;
140 const MCInstrDesc
&MCID
= MI
->getDesc();
141 if (!MCID
.isPredicable())
144 for (unsigned j
= 0, i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
145 if (MCID
.OpInfo
[i
].isPredicate()) {
146 MachineOperand
&MO
= MI
->getOperand(i
);
148 MO
.setReg(Pred
[j
].getReg());
150 } else if (MO
.isImm()) {
151 MO
.setImm(Pred
[j
].getImm());
153 } else if (MO
.isMBB()) {
154 MO
.setMBB(Pred
[j
].getMBB());
163 void TargetInstrInfoImpl::reMaterialize(MachineBasicBlock
&MBB
,
164 MachineBasicBlock::iterator I
,
167 const MachineInstr
*Orig
,
168 const TargetRegisterInfo
&TRI
) const {
169 MachineInstr
*MI
= MBB
.getParent()->CloneMachineInstr(Orig
);
170 MI
->substituteRegister(MI
->getOperand(0).getReg(), DestReg
, SubIdx
, TRI
);
175 TargetInstrInfoImpl::produceSameValue(const MachineInstr
*MI0
,
176 const MachineInstr
*MI1
,
177 const MachineRegisterInfo
*MRI
) const {
178 return MI0
->isIdenticalTo(MI1
, MachineInstr::IgnoreVRegDefs
);
181 MachineInstr
*TargetInstrInfoImpl::duplicate(MachineInstr
*Orig
,
182 MachineFunction
&MF
) const {
183 assert(!Orig
->getDesc().isNotDuplicable() &&
184 "Instruction cannot be duplicated");
185 return MF
.CloneMachineInstr(Orig
);
188 // If the COPY instruction in MI can be folded to a stack operation, return
189 // the register class to use.
190 static const TargetRegisterClass
*canFoldCopy(const MachineInstr
*MI
,
192 assert(MI
->isCopy() && "MI must be a COPY instruction");
193 if (MI
->getNumOperands() != 2)
195 assert(FoldIdx
<2 && "FoldIdx refers no nonexistent operand");
197 const MachineOperand
&FoldOp
= MI
->getOperand(FoldIdx
);
198 const MachineOperand
&LiveOp
= MI
->getOperand(1-FoldIdx
);
200 if (FoldOp
.getSubReg() || LiveOp
.getSubReg())
203 unsigned FoldReg
= FoldOp
.getReg();
204 unsigned LiveReg
= LiveOp
.getReg();
206 assert(TargetRegisterInfo::isVirtualRegister(FoldReg
) &&
207 "Cannot fold physregs");
209 const MachineRegisterInfo
&MRI
= MI
->getParent()->getParent()->getRegInfo();
210 const TargetRegisterClass
*RC
= MRI
.getRegClass(FoldReg
);
212 if (TargetRegisterInfo::isPhysicalRegister(LiveOp
.getReg()))
213 return RC
->contains(LiveOp
.getReg()) ? RC
: 0;
215 if (RC
->hasSubClassEq(MRI
.getRegClass(LiveReg
)))
218 // FIXME: Allow folding when register classes are memory compatible.
222 bool TargetInstrInfoImpl::
223 canFoldMemoryOperand(const MachineInstr
*MI
,
224 const SmallVectorImpl
<unsigned> &Ops
) const {
225 return MI
->isCopy() && Ops
.size() == 1 && canFoldCopy(MI
, Ops
[0]);
228 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
229 /// slot into the specified machine instruction for the specified operand(s).
230 /// If this is possible, a new instruction is returned with the specified
231 /// operand folded, otherwise NULL is returned. The client is responsible for
232 /// removing the old instruction and adding the new one in the instruction
235 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI
,
236 const SmallVectorImpl
<unsigned> &Ops
,
239 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
240 if (MI
->getOperand(Ops
[i
]).isDef())
241 Flags
|= MachineMemOperand::MOStore
;
243 Flags
|= MachineMemOperand::MOLoad
;
245 MachineBasicBlock
*MBB
= MI
->getParent();
246 assert(MBB
&& "foldMemoryOperand needs an inserted instruction");
247 MachineFunction
&MF
= *MBB
->getParent();
249 // Ask the target to do the actual folding.
250 if (MachineInstr
*NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, FI
)) {
251 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
252 assert((!(Flags
& MachineMemOperand::MOStore
) ||
253 NewMI
->getDesc().mayStore()) &&
254 "Folded a def to a non-store!");
255 assert((!(Flags
& MachineMemOperand::MOLoad
) ||
256 NewMI
->getDesc().mayLoad()) &&
257 "Folded a use to a non-load!");
258 const MachineFrameInfo
&MFI
= *MF
.getFrameInfo();
259 assert(MFI
.getObjectOffset(FI
) != -1);
260 MachineMemOperand
*MMO
=
261 MF
.getMachineMemOperand(
262 MachinePointerInfo(PseudoSourceValue::getFixedStack(FI
)),
263 Flags
, MFI
.getObjectSize(FI
),
264 MFI
.getObjectAlignment(FI
));
265 NewMI
->addMemOperand(MF
, MMO
);
267 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
268 return MBB
->insert(MI
, NewMI
);
271 // Straight COPY may fold as load/store.
272 if (!MI
->isCopy() || Ops
.size() != 1)
275 const TargetRegisterClass
*RC
= canFoldCopy(MI
, Ops
[0]);
279 const MachineOperand
&MO
= MI
->getOperand(1-Ops
[0]);
280 MachineBasicBlock::iterator Pos
= MI
;
281 const TargetRegisterInfo
*TRI
= MF
.getTarget().getRegisterInfo();
283 if (Flags
== MachineMemOperand::MOStore
)
284 storeRegToStackSlot(*MBB
, Pos
, MO
.getReg(), MO
.isKill(), FI
, RC
, TRI
);
286 loadRegFromStackSlot(*MBB
, Pos
, MO
.getReg(), FI
, RC
, TRI
);
290 /// foldMemoryOperand - Same as the previous version except it allows folding
291 /// of any load and store from / to any address, not just from a specific
294 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI
,
295 const SmallVectorImpl
<unsigned> &Ops
,
296 MachineInstr
* LoadMI
) const {
297 assert(LoadMI
->getDesc().canFoldAsLoad() && "LoadMI isn't foldable!");
299 for (unsigned i
= 0, e
= Ops
.size(); i
!= e
; ++i
)
300 assert(MI
->getOperand(Ops
[i
]).isUse() && "Folding load into def!");
302 MachineBasicBlock
&MBB
= *MI
->getParent();
303 MachineFunction
&MF
= *MBB
.getParent();
305 // Ask the target to do the actual folding.
306 MachineInstr
*NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, LoadMI
);
307 if (!NewMI
) return 0;
309 NewMI
= MBB
.insert(MI
, NewMI
);
311 // Copy the memoperands from the load to the folded instruction.
312 NewMI
->setMemRefs(LoadMI
->memoperands_begin(),
313 LoadMI
->memoperands_end());
318 bool TargetInstrInfo::
319 isReallyTriviallyReMaterializableGeneric(const MachineInstr
*MI
,
320 AliasAnalysis
*AA
) const {
321 const MachineFunction
&MF
= *MI
->getParent()->getParent();
322 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
323 const TargetMachine
&TM
= MF
.getTarget();
324 const TargetInstrInfo
&TII
= *TM
.getInstrInfo();
325 const TargetRegisterInfo
&TRI
= *TM
.getRegisterInfo();
327 // A load from a fixed stack slot can be rematerialized. This may be
328 // redundant with subsequent checks, but it's target-independent,
329 // simple, and a common case.
331 if (TII
.isLoadFromStackSlot(MI
, FrameIdx
) &&
332 MF
.getFrameInfo()->isImmutableObjectIndex(FrameIdx
))
335 const MCInstrDesc
&MCID
= MI
->getDesc();
337 // Avoid instructions obviously unsafe for remat.
338 if (MCID
.isNotDuplicable() || MCID
.mayStore() ||
339 MI
->hasUnmodeledSideEffects())
342 // Don't remat inline asm. We have no idea how expensive it is
343 // even if it's side effect free.
344 if (MI
->isInlineAsm())
347 // Avoid instructions which load from potentially varying memory.
348 if (MCID
.mayLoad() && !MI
->isInvariantLoad(AA
))
351 // If any of the registers accessed are non-constant, conservatively assume
352 // the instruction is not rematerializable.
353 for (unsigned i
= 0, e
= MI
->getNumOperands(); i
!= e
; ++i
) {
354 const MachineOperand
&MO
= MI
->getOperand(i
);
355 if (!MO
.isReg()) continue;
356 unsigned Reg
= MO
.getReg();
360 // Check for a well-behaved physical register.
361 if (TargetRegisterInfo::isPhysicalRegister(Reg
)) {
363 // If the physreg has no defs anywhere, it's just an ambient register
364 // and we can freely move its uses. Alternatively, if it's allocatable,
365 // it could get allocated to something with a def during allocation.
366 if (!MRI
.def_empty(Reg
))
368 BitVector AllocatableRegs
= TRI
.getAllocatableSet(MF
, 0);
369 if (AllocatableRegs
.test(Reg
))
371 // Check for a def among the register's aliases too.
372 for (const unsigned *Alias
= TRI
.getAliasSet(Reg
); *Alias
; ++Alias
) {
373 unsigned AliasReg
= *Alias
;
374 if (!MRI
.def_empty(AliasReg
))
376 if (AllocatableRegs
.test(AliasReg
))
380 // A physreg def. We can't remat it.
386 // Only allow one virtual-register def, and that in the first operand.
387 if (MO
.isDef() != (i
== 0))
390 // Don't allow any virtual-register uses. Rematting an instruction with
391 // virtual register uses would length the live ranges of the uses, which
392 // is not necessarily a good idea, certainly not "trivial".
397 // Everything checked out.
401 /// isSchedulingBoundary - Test if the given instruction should be
402 /// considered a scheduling boundary. This primarily includes labels
404 bool TargetInstrInfoImpl::isSchedulingBoundary(const MachineInstr
*MI
,
405 const MachineBasicBlock
*MBB
,
406 const MachineFunction
&MF
) const{
407 // Terminators and labels can't be scheduled around.
408 if (MI
->getDesc().isTerminator() || MI
->isLabel())
411 // Don't attempt to schedule around any instruction that defines
412 // a stack-oriented pointer, as it's unlikely to be profitable. This
413 // saves compile time, because it doesn't require every single
414 // stack slot reference to depend on the instruction that does the
416 const TargetLowering
&TLI
= *MF
.getTarget().getTargetLowering();
417 if (MI
->definesRegister(TLI
.getStackPointerRegisterToSaveRestore()))
423 // Provide a global flag for disabling the PreRA hazard recognizer that targets
424 // may choose to honor.
425 bool TargetInstrInfoImpl::usePreRAHazardRecognizer() const {
426 return !DisableHazardRecognizer
;
429 // Default implementation of CreateTargetRAHazardRecognizer.
430 ScheduleHazardRecognizer
*TargetInstrInfoImpl::
431 CreateTargetHazardRecognizer(const TargetMachine
*TM
,
432 const ScheduleDAG
*DAG
) const {
433 // Dummy hazard recognizer allows all instructions to issue.
434 return new ScheduleHazardRecognizer();
437 // Default implementation of CreateTargetPostRAHazardRecognizer.
438 ScheduleHazardRecognizer
*TargetInstrInfoImpl::
439 CreateTargetPostRAHazardRecognizer(const InstrItineraryData
*II
,
440 const ScheduleDAG
*DAG
) const {
441 return (ScheduleHazardRecognizer
*)
442 new ScoreboardHazardRecognizer(II
, DAG
, "post-RA-sched");