1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/CodeGen/MachineFrameInfo.h"
15 #include "llvm/CodeGen/MachineInstrBuilder.h"
16 #include "llvm/CodeGen/MachineMemOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/PseudoSourceValue.h"
19 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
20 #include "llvm/CodeGen/StackMaps.h"
21 #include "llvm/CodeGen/TargetFrameLowering.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/CodeGen/TargetSchedule.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/DebugInfoMetadata.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCInstrItineraries.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetMachine.h"
37 static cl::opt
<bool> DisableHazardRecognizer(
38 "disable-sched-hazard", cl::Hidden
, cl::init(false),
39 cl::desc("Disable hazard detection during preRA scheduling"));
41 TargetInstrInfo::~TargetInstrInfo() {
44 const TargetRegisterClass
*
45 TargetInstrInfo::getRegClass(const MCInstrDesc
&MCID
, unsigned OpNum
,
46 const TargetRegisterInfo
*TRI
,
47 const MachineFunction
&MF
) const {
48 if (OpNum
>= MCID
.getNumOperands())
51 short RegClass
= MCID
.OpInfo
[OpNum
].RegClass
;
52 if (MCID
.OpInfo
[OpNum
].isLookupPtrRegClass())
53 return TRI
->getPointerRegClass(MF
, RegClass
);
55 // Instructions like INSERT_SUBREG do not have fixed register classes.
59 // Otherwise just look it up normally.
60 return TRI
->getRegClass(RegClass
);
63 /// insertNoop - Insert a noop into the instruction stream at the specified
65 void TargetInstrInfo::insertNoop(MachineBasicBlock
&MBB
,
66 MachineBasicBlock::iterator MI
) const {
67 llvm_unreachable("Target didn't implement insertNoop!");
70 static bool isAsmComment(const char *Str
, const MCAsmInfo
&MAI
) {
71 return strncmp(Str
, MAI
.getCommentString().data(),
72 MAI
.getCommentString().size()) == 0;
75 /// Measure the specified inline asm to determine an approximation of its
77 /// Comments (which run till the next SeparatorString or newline) do not
78 /// count as an instruction.
79 /// Any other non-whitespace text is considered an instruction, with
80 /// multiple instructions separated by SeparatorString or newlines.
81 /// Variable-length instructions are not handled here; this function
82 /// may be overloaded in the target code to do that.
83 /// We implement a special case of the .space directive which takes only a
84 /// single integer argument in base 10 that is the size in bytes. This is a
85 /// restricted form of the GAS directive in that we only interpret
86 /// simple--i.e. not a logical or arithmetic expression--size values without
87 /// the optional fill value. This is primarily used for creating arbitrary
88 /// sized inline asm blocks for testing purposes.
89 unsigned TargetInstrInfo::getInlineAsmLength(
91 const MCAsmInfo
&MAI
, const TargetSubtargetInfo
*STI
) const {
92 // Count the number of instructions in the asm.
93 bool AtInsnStart
= true;
95 const unsigned MaxInstLength
= MAI
.getMaxInstLength(STI
);
97 if (*Str
== '\n' || strncmp(Str
, MAI
.getSeparatorString(),
98 strlen(MAI
.getSeparatorString())) == 0) {
100 } else if (isAsmComment(Str
, MAI
)) {
101 // Stop counting as an instruction after a comment until the next
106 if (AtInsnStart
&& !std::isspace(static_cast<unsigned char>(*Str
))) {
107 unsigned AddLength
= MaxInstLength
;
108 if (strncmp(Str
, ".space", 6) == 0) {
111 SpaceSize
= strtol(Str
+ 6, &EStr
, 10);
112 SpaceSize
= SpaceSize
< 0 ? 0 : SpaceSize
;
113 while (*EStr
!= '\n' && std::isspace(static_cast<unsigned char>(*EStr
)))
115 if (*EStr
== '\0' || *EStr
== '\n' ||
116 isAsmComment(EStr
, MAI
)) // Successfully parsed .space argument
117 AddLength
= SpaceSize
;
127 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
128 /// after it, replacing it with an unconditional branch to NewDest.
130 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail
,
131 MachineBasicBlock
*NewDest
) const {
132 MachineBasicBlock
*MBB
= Tail
->getParent();
134 // Remove all the old successors of MBB from the CFG.
135 while (!MBB
->succ_empty())
136 MBB
->removeSuccessor(MBB
->succ_begin());
138 // Save off the debug loc before erasing the instruction.
139 DebugLoc DL
= Tail
->getDebugLoc();
141 // Update call site info and remove all the dead instructions
142 // from the end of MBB.
143 while (Tail
!= MBB
->end()) {
146 MBB
->getParent()->eraseCallSiteInfo(&*MI
);
150 // If MBB isn't immediately before MBB, insert a branch to it.
151 if (++MachineFunction::iterator(MBB
) != MachineFunction::iterator(NewDest
))
152 insertBranch(*MBB
, NewDest
, nullptr, SmallVector
<MachineOperand
, 0>(), DL
);
153 MBB
->addSuccessor(NewDest
);
156 MachineInstr
*TargetInstrInfo::commuteInstructionImpl(MachineInstr
&MI
,
157 bool NewMI
, unsigned Idx1
,
158 unsigned Idx2
) const {
159 const MCInstrDesc
&MCID
= MI
.getDesc();
160 bool HasDef
= MCID
.getNumDefs();
161 if (HasDef
&& !MI
.getOperand(0).isReg())
162 // No idea how to commute this instruction. Target should implement its own.
165 unsigned CommutableOpIdx1
= Idx1
; (void)CommutableOpIdx1
;
166 unsigned CommutableOpIdx2
= Idx2
; (void)CommutableOpIdx2
;
167 assert(findCommutedOpIndices(MI
, CommutableOpIdx1
, CommutableOpIdx2
) &&
168 CommutableOpIdx1
== Idx1
&& CommutableOpIdx2
== Idx2
&&
169 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
170 assert(MI
.getOperand(Idx1
).isReg() && MI
.getOperand(Idx2
).isReg() &&
171 "This only knows how to commute register operands so far");
173 Register Reg0
= HasDef
? MI
.getOperand(0).getReg() : Register();
174 Register Reg1
= MI
.getOperand(Idx1
).getReg();
175 Register Reg2
= MI
.getOperand(Idx2
).getReg();
176 unsigned SubReg0
= HasDef
? MI
.getOperand(0).getSubReg() : 0;
177 unsigned SubReg1
= MI
.getOperand(Idx1
).getSubReg();
178 unsigned SubReg2
= MI
.getOperand(Idx2
).getSubReg();
179 bool Reg1IsKill
= MI
.getOperand(Idx1
).isKill();
180 bool Reg2IsKill
= MI
.getOperand(Idx2
).isKill();
181 bool Reg1IsUndef
= MI
.getOperand(Idx1
).isUndef();
182 bool Reg2IsUndef
= MI
.getOperand(Idx2
).isUndef();
183 bool Reg1IsInternal
= MI
.getOperand(Idx1
).isInternalRead();
184 bool Reg2IsInternal
= MI
.getOperand(Idx2
).isInternalRead();
185 // Avoid calling isRenamable for virtual registers since we assert that
186 // renamable property is only queried/set for physical registers.
187 bool Reg1IsRenamable
= Register::isPhysicalRegister(Reg1
)
188 ? MI
.getOperand(Idx1
).isRenamable()
190 bool Reg2IsRenamable
= Register::isPhysicalRegister(Reg2
)
191 ? MI
.getOperand(Idx2
).isRenamable()
193 // If destination is tied to either of the commuted source register, then
194 // it must be updated.
195 if (HasDef
&& Reg0
== Reg1
&&
196 MI
.getDesc().getOperandConstraint(Idx1
, MCOI::TIED_TO
) == 0) {
200 } else if (HasDef
&& Reg0
== Reg2
&&
201 MI
.getDesc().getOperandConstraint(Idx2
, MCOI::TIED_TO
) == 0) {
207 MachineInstr
*CommutedMI
= nullptr;
209 // Create a new instruction.
210 MachineFunction
&MF
= *MI
.getMF();
211 CommutedMI
= MF
.CloneMachineInstr(&MI
);
217 CommutedMI
->getOperand(0).setReg(Reg0
);
218 CommutedMI
->getOperand(0).setSubReg(SubReg0
);
220 CommutedMI
->getOperand(Idx2
).setReg(Reg1
);
221 CommutedMI
->getOperand(Idx1
).setReg(Reg2
);
222 CommutedMI
->getOperand(Idx2
).setSubReg(SubReg1
);
223 CommutedMI
->getOperand(Idx1
).setSubReg(SubReg2
);
224 CommutedMI
->getOperand(Idx2
).setIsKill(Reg1IsKill
);
225 CommutedMI
->getOperand(Idx1
).setIsKill(Reg2IsKill
);
226 CommutedMI
->getOperand(Idx2
).setIsUndef(Reg1IsUndef
);
227 CommutedMI
->getOperand(Idx1
).setIsUndef(Reg2IsUndef
);
228 CommutedMI
->getOperand(Idx2
).setIsInternalRead(Reg1IsInternal
);
229 CommutedMI
->getOperand(Idx1
).setIsInternalRead(Reg2IsInternal
);
230 // Avoid calling setIsRenamable for virtual registers since we assert that
231 // renamable property is only queried/set for physical registers.
232 if (Register::isPhysicalRegister(Reg1
))
233 CommutedMI
->getOperand(Idx2
).setIsRenamable(Reg1IsRenamable
);
234 if (Register::isPhysicalRegister(Reg2
))
235 CommutedMI
->getOperand(Idx1
).setIsRenamable(Reg2IsRenamable
);
239 MachineInstr
*TargetInstrInfo::commuteInstruction(MachineInstr
&MI
, bool NewMI
,
241 unsigned OpIdx2
) const {
242 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
243 // any commutable operand, which is done in findCommutedOpIndices() method
245 if ((OpIdx1
== CommuteAnyOperandIndex
|| OpIdx2
== CommuteAnyOperandIndex
) &&
246 !findCommutedOpIndices(MI
, OpIdx1
, OpIdx2
)) {
247 assert(MI
.isCommutable() &&
248 "Precondition violation: MI must be commutable.");
251 return commuteInstructionImpl(MI
, NewMI
, OpIdx1
, OpIdx2
);
254 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1
,
255 unsigned &ResultIdx2
,
256 unsigned CommutableOpIdx1
,
257 unsigned CommutableOpIdx2
) {
258 if (ResultIdx1
== CommuteAnyOperandIndex
&&
259 ResultIdx2
== CommuteAnyOperandIndex
) {
260 ResultIdx1
= CommutableOpIdx1
;
261 ResultIdx2
= CommutableOpIdx2
;
262 } else if (ResultIdx1
== CommuteAnyOperandIndex
) {
263 if (ResultIdx2
== CommutableOpIdx1
)
264 ResultIdx1
= CommutableOpIdx2
;
265 else if (ResultIdx2
== CommutableOpIdx2
)
266 ResultIdx1
= CommutableOpIdx1
;
269 } else if (ResultIdx2
== CommuteAnyOperandIndex
) {
270 if (ResultIdx1
== CommutableOpIdx1
)
271 ResultIdx2
= CommutableOpIdx2
;
272 else if (ResultIdx1
== CommutableOpIdx2
)
273 ResultIdx2
= CommutableOpIdx1
;
277 // Check that the result operand indices match the given commutable
279 return (ResultIdx1
== CommutableOpIdx1
&& ResultIdx2
== CommutableOpIdx2
) ||
280 (ResultIdx1
== CommutableOpIdx2
&& ResultIdx2
== CommutableOpIdx1
);
285 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr
&MI
,
287 unsigned &SrcOpIdx2
) const {
288 assert(!MI
.isBundle() &&
289 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
291 const MCInstrDesc
&MCID
= MI
.getDesc();
292 if (!MCID
.isCommutable())
295 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
296 // is not true, then the target must implement this.
297 unsigned CommutableOpIdx1
= MCID
.getNumDefs();
298 unsigned CommutableOpIdx2
= CommutableOpIdx1
+ 1;
299 if (!fixCommutedOpIndices(SrcOpIdx1
, SrcOpIdx2
,
300 CommutableOpIdx1
, CommutableOpIdx2
))
303 if (!MI
.getOperand(SrcOpIdx1
).isReg() || !MI
.getOperand(SrcOpIdx2
).isReg())
309 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr
&MI
) const {
310 if (!MI
.isTerminator()) return false;
312 // Conditional branch is a special case.
313 if (MI
.isBranch() && !MI
.isBarrier())
315 if (!MI
.isPredicable())
317 return !isPredicated(MI
);
320 bool TargetInstrInfo::PredicateInstruction(
321 MachineInstr
&MI
, ArrayRef
<MachineOperand
> Pred
) const {
322 bool MadeChange
= false;
324 assert(!MI
.isBundle() &&
325 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
327 const MCInstrDesc
&MCID
= MI
.getDesc();
328 if (!MI
.isPredicable())
331 for (unsigned j
= 0, i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
332 if (MCID
.OpInfo
[i
].isPredicate()) {
333 MachineOperand
&MO
= MI
.getOperand(i
);
335 MO
.setReg(Pred
[j
].getReg());
337 } else if (MO
.isImm()) {
338 MO
.setImm(Pred
[j
].getImm());
340 } else if (MO
.isMBB()) {
341 MO
.setMBB(Pred
[j
].getMBB());
350 bool TargetInstrInfo::hasLoadFromStackSlot(
351 const MachineInstr
&MI
,
352 SmallVectorImpl
<const MachineMemOperand
*> &Accesses
) const {
353 size_t StartSize
= Accesses
.size();
354 for (MachineInstr::mmo_iterator o
= MI
.memoperands_begin(),
355 oe
= MI
.memoperands_end();
357 if ((*o
)->isLoad() &&
358 dyn_cast_or_null
<FixedStackPseudoSourceValue
>((*o
)->getPseudoValue()))
359 Accesses
.push_back(*o
);
361 return Accesses
.size() != StartSize
;
364 bool TargetInstrInfo::hasStoreToStackSlot(
365 const MachineInstr
&MI
,
366 SmallVectorImpl
<const MachineMemOperand
*> &Accesses
) const {
367 size_t StartSize
= Accesses
.size();
368 for (MachineInstr::mmo_iterator o
= MI
.memoperands_begin(),
369 oe
= MI
.memoperands_end();
371 if ((*o
)->isStore() &&
372 dyn_cast_or_null
<FixedStackPseudoSourceValue
>((*o
)->getPseudoValue()))
373 Accesses
.push_back(*o
);
375 return Accesses
.size() != StartSize
;
378 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass
*RC
,
379 unsigned SubIdx
, unsigned &Size
,
381 const MachineFunction
&MF
) const {
382 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
384 Size
= TRI
->getSpillSize(*RC
);
388 unsigned BitSize
= TRI
->getSubRegIdxSize(SubIdx
);
389 // Convert bit size to byte size.
393 int BitOffset
= TRI
->getSubRegIdxOffset(SubIdx
);
394 if (BitOffset
< 0 || BitOffset
% 8)
398 Offset
= (unsigned)BitOffset
/ 8;
400 assert(TRI
->getSpillSize(*RC
) >= (Offset
+ Size
) && "bad subregister range");
402 if (!MF
.getDataLayout().isLittleEndian()) {
403 Offset
= TRI
->getSpillSize(*RC
) - (Offset
+ Size
);
408 void TargetInstrInfo::reMaterialize(MachineBasicBlock
&MBB
,
409 MachineBasicBlock::iterator I
,
410 unsigned DestReg
, unsigned SubIdx
,
411 const MachineInstr
&Orig
,
412 const TargetRegisterInfo
&TRI
) const {
413 MachineInstr
*MI
= MBB
.getParent()->CloneMachineInstr(&Orig
);
414 MI
->substituteRegister(MI
->getOperand(0).getReg(), DestReg
, SubIdx
, TRI
);
418 bool TargetInstrInfo::produceSameValue(const MachineInstr
&MI0
,
419 const MachineInstr
&MI1
,
420 const MachineRegisterInfo
*MRI
) const {
421 return MI0
.isIdenticalTo(MI1
, MachineInstr::IgnoreVRegDefs
);
424 MachineInstr
&TargetInstrInfo::duplicate(MachineBasicBlock
&MBB
,
425 MachineBasicBlock::iterator InsertBefore
, const MachineInstr
&Orig
) const {
426 assert(!Orig
.isNotDuplicable() && "Instruction cannot be duplicated");
427 MachineFunction
&MF
= *MBB
.getParent();
428 return MF
.CloneMachineInstrBundle(MBB
, InsertBefore
, Orig
);
431 // If the COPY instruction in MI can be folded to a stack operation, return
432 // the register class to use.
433 static const TargetRegisterClass
*canFoldCopy(const MachineInstr
&MI
,
435 assert(MI
.isCopy() && "MI must be a COPY instruction");
436 if (MI
.getNumOperands() != 2)
438 assert(FoldIdx
<2 && "FoldIdx refers no nonexistent operand");
440 const MachineOperand
&FoldOp
= MI
.getOperand(FoldIdx
);
441 const MachineOperand
&LiveOp
= MI
.getOperand(1 - FoldIdx
);
443 if (FoldOp
.getSubReg() || LiveOp
.getSubReg())
446 Register FoldReg
= FoldOp
.getReg();
447 Register LiveReg
= LiveOp
.getReg();
449 assert(Register::isVirtualRegister(FoldReg
) && "Cannot fold physregs");
451 const MachineRegisterInfo
&MRI
= MI
.getMF()->getRegInfo();
452 const TargetRegisterClass
*RC
= MRI
.getRegClass(FoldReg
);
454 if (Register::isPhysicalRegister(LiveOp
.getReg()))
455 return RC
->contains(LiveOp
.getReg()) ? RC
: nullptr;
457 if (RC
->hasSubClassEq(MRI
.getRegClass(LiveReg
)))
460 // FIXME: Allow folding when register classes are memory compatible.
464 void TargetInstrInfo::getNoop(MCInst
&NopInst
) const {
465 llvm_unreachable("Not implemented");
468 static MachineInstr
*foldPatchpoint(MachineFunction
&MF
, MachineInstr
&MI
,
469 ArrayRef
<unsigned> Ops
, int FrameIndex
,
470 const TargetInstrInfo
&TII
) {
471 unsigned StartIdx
= 0;
472 switch (MI
.getOpcode()) {
473 case TargetOpcode::STACKMAP
: {
474 // StackMapLiveValues are foldable
475 StartIdx
= StackMapOpers(&MI
).getVarIdx();
478 case TargetOpcode::PATCHPOINT
: {
479 // For PatchPoint, the call args are not foldable (even if reported in the
480 // stackmap e.g. via anyregcc).
481 StartIdx
= PatchPointOpers(&MI
).getVarIdx();
484 case TargetOpcode::STATEPOINT
: {
485 // For statepoints, fold deopt and gc arguments, but not call arguments.
486 StartIdx
= StatepointOpers(&MI
).getVarIdx();
490 llvm_unreachable("unexpected stackmap opcode");
493 // Return false if any operands requested for folding are not foldable (not
494 // part of the stackmap's live values).
495 for (unsigned Op
: Ops
) {
500 MachineInstr
*NewMI
=
501 MF
.CreateMachineInstr(TII
.get(MI
.getOpcode()), MI
.getDebugLoc(), true);
502 MachineInstrBuilder
MIB(MF
, NewMI
);
504 // No need to fold return, the meta data, and function arguments
505 for (unsigned i
= 0; i
< StartIdx
; ++i
)
506 MIB
.add(MI
.getOperand(i
));
508 for (unsigned i
= StartIdx
; i
< MI
.getNumOperands(); ++i
) {
509 MachineOperand
&MO
= MI
.getOperand(i
);
510 if (is_contained(Ops
, i
)) {
512 unsigned SpillOffset
;
513 // Compute the spill slot size and offset.
514 const TargetRegisterClass
*RC
=
515 MF
.getRegInfo().getRegClass(MO
.getReg());
517 TII
.getStackSlotRange(RC
, MO
.getSubReg(), SpillSize
, SpillOffset
, MF
);
519 report_fatal_error("cannot spill patchpoint subregister operand");
520 MIB
.addImm(StackMaps::IndirectMemRefOp
);
521 MIB
.addImm(SpillSize
);
522 MIB
.addFrameIndex(FrameIndex
);
523 MIB
.addImm(SpillOffset
);
531 MachineInstr
*TargetInstrInfo::foldMemoryOperand(MachineInstr
&MI
,
532 ArrayRef
<unsigned> Ops
, int FI
,
534 VirtRegMap
*VRM
) const {
535 auto Flags
= MachineMemOperand::MONone
;
536 for (unsigned OpIdx
: Ops
)
537 Flags
|= MI
.getOperand(OpIdx
).isDef() ? MachineMemOperand::MOStore
538 : MachineMemOperand::MOLoad
;
540 MachineBasicBlock
*MBB
= MI
.getParent();
541 assert(MBB
&& "foldMemoryOperand needs an inserted instruction");
542 MachineFunction
&MF
= *MBB
->getParent();
544 // If we're not folding a load into a subreg, the size of the load is the
545 // size of the spill slot. But if we are, we need to figure out what the
546 // actual load size is.
548 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
549 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
551 if (Flags
& MachineMemOperand::MOStore
) {
552 MemSize
= MFI
.getObjectSize(FI
);
554 for (unsigned OpIdx
: Ops
) {
555 int64_t OpSize
= MFI
.getObjectSize(FI
);
557 if (auto SubReg
= MI
.getOperand(OpIdx
).getSubReg()) {
558 unsigned SubRegSize
= TRI
->getSubRegIdxSize(SubReg
);
559 if (SubRegSize
> 0 && !(SubRegSize
% 8))
560 OpSize
= SubRegSize
/ 8;
563 MemSize
= std::max(MemSize
, OpSize
);
567 assert(MemSize
&& "Did not expect a zero-sized stack slot");
569 MachineInstr
*NewMI
= nullptr;
571 if (MI
.getOpcode() == TargetOpcode::STACKMAP
||
572 MI
.getOpcode() == TargetOpcode::PATCHPOINT
||
573 MI
.getOpcode() == TargetOpcode::STATEPOINT
) {
574 // Fold stackmap/patchpoint.
575 NewMI
= foldPatchpoint(MF
, MI
, Ops
, FI
, *this);
577 MBB
->insert(MI
, NewMI
);
579 // Ask the target to do the actual folding.
580 NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, MI
, FI
, LIS
, VRM
);
584 NewMI
->setMemRefs(MF
, MI
.memoperands());
585 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
586 assert((!(Flags
& MachineMemOperand::MOStore
) ||
587 NewMI
->mayStore()) &&
588 "Folded a def to a non-store!");
589 assert((!(Flags
& MachineMemOperand::MOLoad
) ||
591 "Folded a use to a non-load!");
592 assert(MFI
.getObjectOffset(FI
) != -1);
593 MachineMemOperand
*MMO
= MF
.getMachineMemOperand(
594 MachinePointerInfo::getFixedStack(MF
, FI
), Flags
, MemSize
,
595 MFI
.getObjectAlignment(FI
));
596 NewMI
->addMemOperand(MF
, MMO
);
601 // Straight COPY may fold as load/store.
602 if (!MI
.isCopy() || Ops
.size() != 1)
605 const TargetRegisterClass
*RC
= canFoldCopy(MI
, Ops
[0]);
609 const MachineOperand
&MO
= MI
.getOperand(1 - Ops
[0]);
610 MachineBasicBlock::iterator Pos
= MI
;
612 if (Flags
== MachineMemOperand::MOStore
)
613 storeRegToStackSlot(*MBB
, Pos
, MO
.getReg(), MO
.isKill(), FI
, RC
, TRI
);
615 loadRegFromStackSlot(*MBB
, Pos
, MO
.getReg(), FI
, RC
, TRI
);
619 MachineInstr
*TargetInstrInfo::foldMemoryOperand(MachineInstr
&MI
,
620 ArrayRef
<unsigned> Ops
,
621 MachineInstr
&LoadMI
,
622 LiveIntervals
*LIS
) const {
623 assert(LoadMI
.canFoldAsLoad() && "LoadMI isn't foldable!");
625 for (unsigned OpIdx
: Ops
)
626 assert(MI
.getOperand(OpIdx
).isUse() && "Folding load into def!");
629 MachineBasicBlock
&MBB
= *MI
.getParent();
630 MachineFunction
&MF
= *MBB
.getParent();
632 // Ask the target to do the actual folding.
633 MachineInstr
*NewMI
= nullptr;
636 if ((MI
.getOpcode() == TargetOpcode::STACKMAP
||
637 MI
.getOpcode() == TargetOpcode::PATCHPOINT
||
638 MI
.getOpcode() == TargetOpcode::STATEPOINT
) &&
639 isLoadFromStackSlot(LoadMI
, FrameIndex
)) {
640 // Fold stackmap/patchpoint.
641 NewMI
= foldPatchpoint(MF
, MI
, Ops
, FrameIndex
, *this);
643 NewMI
= &*MBB
.insert(MI
, NewMI
);
645 // Ask the target to do the actual folding.
646 NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, MI
, LoadMI
, LIS
);
652 // Copy the memoperands from the load to the folded instruction.
653 if (MI
.memoperands_empty()) {
654 NewMI
->setMemRefs(MF
, LoadMI
.memoperands());
656 // Handle the rare case of folding multiple loads.
657 NewMI
->setMemRefs(MF
, MI
.memoperands());
658 for (MachineInstr::mmo_iterator I
= LoadMI
.memoperands_begin(),
659 E
= LoadMI
.memoperands_end();
661 NewMI
->addMemOperand(MF
, *I
);
667 bool TargetInstrInfo::hasReassociableOperands(
668 const MachineInstr
&Inst
, const MachineBasicBlock
*MBB
) const {
669 const MachineOperand
&Op1
= Inst
.getOperand(1);
670 const MachineOperand
&Op2
= Inst
.getOperand(2);
671 const MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
673 // We need virtual register definitions for the operands that we will
675 MachineInstr
*MI1
= nullptr;
676 MachineInstr
*MI2
= nullptr;
677 if (Op1
.isReg() && Register::isVirtualRegister(Op1
.getReg()))
678 MI1
= MRI
.getUniqueVRegDef(Op1
.getReg());
679 if (Op2
.isReg() && Register::isVirtualRegister(Op2
.getReg()))
680 MI2
= MRI
.getUniqueVRegDef(Op2
.getReg());
682 // And they need to be in the trace (otherwise, they won't have a depth).
683 return MI1
&& MI2
&& MI1
->getParent() == MBB
&& MI2
->getParent() == MBB
;
686 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr
&Inst
,
687 bool &Commuted
) const {
688 const MachineBasicBlock
*MBB
= Inst
.getParent();
689 const MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
690 MachineInstr
*MI1
= MRI
.getUniqueVRegDef(Inst
.getOperand(1).getReg());
691 MachineInstr
*MI2
= MRI
.getUniqueVRegDef(Inst
.getOperand(2).getReg());
692 unsigned AssocOpcode
= Inst
.getOpcode();
694 // If only one operand has the same opcode and it's the second source operand,
695 // the operands must be commuted.
696 Commuted
= MI1
->getOpcode() != AssocOpcode
&& MI2
->getOpcode() == AssocOpcode
;
700 // 1. The previous instruction must be the same type as Inst.
701 // 2. The previous instruction must have virtual register definitions for its
702 // operands in the same basic block as Inst.
703 // 3. The previous instruction's result must only be used by Inst.
704 return MI1
->getOpcode() == AssocOpcode
&&
705 hasReassociableOperands(*MI1
, MBB
) &&
706 MRI
.hasOneNonDBGUse(MI1
->getOperand(0).getReg());
709 // 1. The operation must be associative and commutative.
710 // 2. The instruction must have virtual register definitions for its
711 // operands in the same basic block.
712 // 3. The instruction must have a reassociable sibling.
713 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr
&Inst
,
714 bool &Commuted
) const {
715 return isAssociativeAndCommutative(Inst
) &&
716 hasReassociableOperands(Inst
, Inst
.getParent()) &&
717 hasReassociableSibling(Inst
, Commuted
);
720 // The concept of the reassociation pass is that these operations can benefit
721 // from this kind of transformation:
731 // breaking the dependency between A and B, allowing them to be executed in
732 // parallel (or back-to-back in a pipeline) instead of depending on each other.
734 // FIXME: This has the potential to be expensive (compile time) while not
735 // improving the code at all. Some ways to limit the overhead:
736 // 1. Track successful transforms; bail out if hit rate gets too low.
737 // 2. Only enable at -O3 or some other non-default optimization level.
738 // 3. Pre-screen pattern candidates here: if an operand of the previous
739 // instruction is known to not increase the critical path, then don't match
741 bool TargetInstrInfo::getMachineCombinerPatterns(
743 SmallVectorImpl
<MachineCombinerPattern
> &Patterns
) const {
745 if (isReassociationCandidate(Root
, Commute
)) {
746 // We found a sequence of instructions that may be suitable for a
747 // reassociation of operands to increase ILP. Specify each commutation
748 // possibility for the Prev instruction in the sequence and let the
749 // machine combiner decide if changing the operands is worthwhile.
751 Patterns
.push_back(MachineCombinerPattern::REASSOC_AX_YB
);
752 Patterns
.push_back(MachineCombinerPattern::REASSOC_XA_YB
);
754 Patterns
.push_back(MachineCombinerPattern::REASSOC_AX_BY
);
755 Patterns
.push_back(MachineCombinerPattern::REASSOC_XA_BY
);
763 /// Return true when a code sequence can improve loop throughput.
765 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern
) const {
769 /// Attempt the reassociation transformation to reduce critical path length.
770 /// See the above comments before getMachineCombinerPatterns().
771 void TargetInstrInfo::reassociateOps(
772 MachineInstr
&Root
, MachineInstr
&Prev
,
773 MachineCombinerPattern Pattern
,
774 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
775 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
776 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
) const {
777 MachineFunction
*MF
= Root
.getMF();
778 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
779 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
780 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
781 const TargetRegisterClass
*RC
= Root
.getRegClassConstraint(0, TII
, TRI
);
783 // This array encodes the operand index for each parameter because the
784 // operands may be commuted. Each row corresponds to a pattern value,
785 // and each column specifies the index of A, B, X, Y.
786 unsigned OpIdx
[4][4] = {
795 case MachineCombinerPattern::REASSOC_AX_BY
: Row
= 0; break;
796 case MachineCombinerPattern::REASSOC_AX_YB
: Row
= 1; break;
797 case MachineCombinerPattern::REASSOC_XA_BY
: Row
= 2; break;
798 case MachineCombinerPattern::REASSOC_XA_YB
: Row
= 3; break;
799 default: llvm_unreachable("unexpected MachineCombinerPattern");
802 MachineOperand
&OpA
= Prev
.getOperand(OpIdx
[Row
][0]);
803 MachineOperand
&OpB
= Root
.getOperand(OpIdx
[Row
][1]);
804 MachineOperand
&OpX
= Prev
.getOperand(OpIdx
[Row
][2]);
805 MachineOperand
&OpY
= Root
.getOperand(OpIdx
[Row
][3]);
806 MachineOperand
&OpC
= Root
.getOperand(0);
808 Register RegA
= OpA
.getReg();
809 Register RegB
= OpB
.getReg();
810 Register RegX
= OpX
.getReg();
811 Register RegY
= OpY
.getReg();
812 Register RegC
= OpC
.getReg();
814 if (Register::isVirtualRegister(RegA
))
815 MRI
.constrainRegClass(RegA
, RC
);
816 if (Register::isVirtualRegister(RegB
))
817 MRI
.constrainRegClass(RegB
, RC
);
818 if (Register::isVirtualRegister(RegX
))
819 MRI
.constrainRegClass(RegX
, RC
);
820 if (Register::isVirtualRegister(RegY
))
821 MRI
.constrainRegClass(RegY
, RC
);
822 if (Register::isVirtualRegister(RegC
))
823 MRI
.constrainRegClass(RegC
, RC
);
825 // Create a new virtual register for the result of (X op Y) instead of
826 // recycling RegB because the MachineCombiner's computation of the critical
827 // path requires a new register definition rather than an existing one.
828 Register NewVR
= MRI
.createVirtualRegister(RC
);
829 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
831 unsigned Opcode
= Root
.getOpcode();
832 bool KillA
= OpA
.isKill();
833 bool KillX
= OpX
.isKill();
834 bool KillY
= OpY
.isKill();
836 // Create new instructions for insertion.
837 MachineInstrBuilder MIB1
=
838 BuildMI(*MF
, Prev
.getDebugLoc(), TII
->get(Opcode
), NewVR
)
839 .addReg(RegX
, getKillRegState(KillX
))
840 .addReg(RegY
, getKillRegState(KillY
));
841 MachineInstrBuilder MIB2
=
842 BuildMI(*MF
, Root
.getDebugLoc(), TII
->get(Opcode
), RegC
)
843 .addReg(RegA
, getKillRegState(KillA
))
844 .addReg(NewVR
, getKillRegState(true));
846 setSpecialOperandAttr(Root
, Prev
, *MIB1
, *MIB2
);
848 // Record new instructions for insertion and old instructions for deletion.
849 InsInstrs
.push_back(MIB1
);
850 InsInstrs
.push_back(MIB2
);
851 DelInstrs
.push_back(&Prev
);
852 DelInstrs
.push_back(&Root
);
855 void TargetInstrInfo::genAlternativeCodeSequence(
856 MachineInstr
&Root
, MachineCombinerPattern Pattern
,
857 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
858 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
859 DenseMap
<unsigned, unsigned> &InstIdxForVirtReg
) const {
860 MachineRegisterInfo
&MRI
= Root
.getMF()->getRegInfo();
862 // Select the previous instruction in the sequence based on the input pattern.
863 MachineInstr
*Prev
= nullptr;
865 case MachineCombinerPattern::REASSOC_AX_BY
:
866 case MachineCombinerPattern::REASSOC_XA_BY
:
867 Prev
= MRI
.getUniqueVRegDef(Root
.getOperand(1).getReg());
869 case MachineCombinerPattern::REASSOC_AX_YB
:
870 case MachineCombinerPattern::REASSOC_XA_YB
:
871 Prev
= MRI
.getUniqueVRegDef(Root
.getOperand(2).getReg());
877 assert(Prev
&& "Unknown pattern for machine combiner");
879 reassociateOps(Root
, *Prev
, Pattern
, InsInstrs
, DelInstrs
, InstIdxForVirtReg
);
882 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
883 const MachineInstr
&MI
, AAResults
*AA
) const {
884 const MachineFunction
&MF
= *MI
.getMF();
885 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
887 // Remat clients assume operand 0 is the defined register.
888 if (!MI
.getNumOperands() || !MI
.getOperand(0).isReg())
890 Register DefReg
= MI
.getOperand(0).getReg();
892 // A sub-register definition can only be rematerialized if the instruction
893 // doesn't read the other parts of the register. Otherwise it is really a
894 // read-modify-write operation on the full virtual register which cannot be
896 if (Register::isVirtualRegister(DefReg
) && MI
.getOperand(0).getSubReg() &&
897 MI
.readsVirtualRegister(DefReg
))
900 // A load from a fixed stack slot can be rematerialized. This may be
901 // redundant with subsequent checks, but it's target-independent,
902 // simple, and a common case.
904 if (isLoadFromStackSlot(MI
, FrameIdx
) &&
905 MF
.getFrameInfo().isImmutableObjectIndex(FrameIdx
))
908 // Avoid instructions obviously unsafe for remat.
909 if (MI
.isNotDuplicable() || MI
.mayStore() || MI
.mayRaiseFPException() ||
910 MI
.hasUnmodeledSideEffects())
913 // Don't remat inline asm. We have no idea how expensive it is
914 // even if it's side effect free.
915 if (MI
.isInlineAsm())
918 // Avoid instructions which load from potentially varying memory.
919 if (MI
.mayLoad() && !MI
.isDereferenceableInvariantLoad(AA
))
922 // If any of the registers accessed are non-constant, conservatively assume
923 // the instruction is not rematerializable.
924 for (unsigned i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
925 const MachineOperand
&MO
= MI
.getOperand(i
);
926 if (!MO
.isReg()) continue;
927 Register Reg
= MO
.getReg();
931 // Check for a well-behaved physical register.
932 if (Register::isPhysicalRegister(Reg
)) {
934 // If the physreg has no defs anywhere, it's just an ambient register
935 // and we can freely move its uses. Alternatively, if it's allocatable,
936 // it could get allocated to something with a def during allocation.
937 if (!MRI
.isConstantPhysReg(Reg
))
940 // A physreg def. We can't remat it.
946 // Only allow one virtual-register def. There may be multiple defs of the
947 // same virtual register, though.
948 if (MO
.isDef() && Reg
!= DefReg
)
951 // Don't allow any virtual-register uses. Rematting an instruction with
952 // virtual register uses would length the live ranges of the uses, which
953 // is not necessarily a good idea, certainly not "trivial".
958 // Everything checked out.
962 int TargetInstrInfo::getSPAdjust(const MachineInstr
&MI
) const {
963 const MachineFunction
*MF
= MI
.getMF();
964 const TargetFrameLowering
*TFI
= MF
->getSubtarget().getFrameLowering();
965 bool StackGrowsDown
=
966 TFI
->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown
;
968 unsigned FrameSetupOpcode
= getCallFrameSetupOpcode();
969 unsigned FrameDestroyOpcode
= getCallFrameDestroyOpcode();
971 if (!isFrameInstr(MI
))
974 int SPAdj
= TFI
->alignSPAdjust(getFrameSize(MI
));
976 if ((!StackGrowsDown
&& MI
.getOpcode() == FrameSetupOpcode
) ||
977 (StackGrowsDown
&& MI
.getOpcode() == FrameDestroyOpcode
))
983 /// isSchedulingBoundary - Test if the given instruction should be
984 /// considered a scheduling boundary. This primarily includes labels
986 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr
&MI
,
987 const MachineBasicBlock
*MBB
,
988 const MachineFunction
&MF
) const {
989 // Terminators and labels can't be scheduled around.
990 if (MI
.isTerminator() || MI
.isPosition())
993 // Don't attempt to schedule around any instruction that defines
994 // a stack-oriented pointer, as it's unlikely to be profitable. This
995 // saves compile time, because it doesn't require every single
996 // stack slot reference to depend on the instruction that does the
998 const TargetLowering
&TLI
= *MF
.getSubtarget().getTargetLowering();
999 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
1000 return MI
.modifiesRegister(TLI
.getStackPointerRegisterToSaveRestore(), TRI
);
1003 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1004 // may choose to honor.
1005 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1006 return !DisableHazardRecognizer
;
1009 // Default implementation of CreateTargetRAHazardRecognizer.
1010 ScheduleHazardRecognizer
*TargetInstrInfo::
1011 CreateTargetHazardRecognizer(const TargetSubtargetInfo
*STI
,
1012 const ScheduleDAG
*DAG
) const {
1013 // Dummy hazard recognizer allows all instructions to issue.
1014 return new ScheduleHazardRecognizer();
1017 // Default implementation of CreateTargetMIHazardRecognizer.
1018 ScheduleHazardRecognizer
*TargetInstrInfo::
1019 CreateTargetMIHazardRecognizer(const InstrItineraryData
*II
,
1020 const ScheduleDAG
*DAG
) const {
1021 return (ScheduleHazardRecognizer
*)
1022 new ScoreboardHazardRecognizer(II
, DAG
, "machine-scheduler");
1025 // Default implementation of CreateTargetPostRAHazardRecognizer.
1026 ScheduleHazardRecognizer
*TargetInstrInfo::
1027 CreateTargetPostRAHazardRecognizer(const InstrItineraryData
*II
,
1028 const ScheduleDAG
*DAG
) const {
1029 return (ScheduleHazardRecognizer
*)
1030 new ScoreboardHazardRecognizer(II
, DAG
, "post-RA-sched");
1033 //===----------------------------------------------------------------------===//
1034 // SelectionDAG latency interface.
1035 //===----------------------------------------------------------------------===//
1038 TargetInstrInfo::getOperandLatency(const InstrItineraryData
*ItinData
,
1039 SDNode
*DefNode
, unsigned DefIdx
,
1040 SDNode
*UseNode
, unsigned UseIdx
) const {
1041 if (!ItinData
|| ItinData
->isEmpty())
1044 if (!DefNode
->isMachineOpcode())
1047 unsigned DefClass
= get(DefNode
->getMachineOpcode()).getSchedClass();
1048 if (!UseNode
->isMachineOpcode())
1049 return ItinData
->getOperandCycle(DefClass
, DefIdx
);
1050 unsigned UseClass
= get(UseNode
->getMachineOpcode()).getSchedClass();
1051 return ItinData
->getOperandLatency(DefClass
, DefIdx
, UseClass
, UseIdx
);
1054 int TargetInstrInfo::getInstrLatency(const InstrItineraryData
*ItinData
,
1056 if (!ItinData
|| ItinData
->isEmpty())
1059 if (!N
->isMachineOpcode())
1062 return ItinData
->getStageLatency(get(N
->getMachineOpcode()).getSchedClass());
1065 //===----------------------------------------------------------------------===//
1066 // MachineInstr latency interface.
1067 //===----------------------------------------------------------------------===//
1069 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData
*ItinData
,
1070 const MachineInstr
&MI
) const {
1071 if (!ItinData
|| ItinData
->isEmpty())
1074 unsigned Class
= MI
.getDesc().getSchedClass();
1075 int UOps
= ItinData
->Itineraries
[Class
].NumMicroOps
;
1079 // The # of u-ops is dynamically determined. The specific target should
1080 // override this function to return the right number.
1084 /// Return the default expected latency for a def based on it's opcode.
1085 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel
&SchedModel
,
1086 const MachineInstr
&DefMI
) const {
1087 if (DefMI
.isTransient())
1089 if (DefMI
.mayLoad())
1090 return SchedModel
.LoadLatency
;
1091 if (isHighLatencyDef(DefMI
.getOpcode()))
1092 return SchedModel
.HighLatency
;
1096 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr
&) const {
1100 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData
*ItinData
,
1101 const MachineInstr
&MI
,
1102 unsigned *PredCost
) const {
1103 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1104 // still have a MinLatency property, which getStageLatency checks.
1106 return MI
.mayLoad() ? 2 : 1;
1108 return ItinData
->getStageLatency(MI
.getDesc().getSchedClass());
1111 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel
&SchedModel
,
1112 const MachineInstr
&DefMI
,
1113 unsigned DefIdx
) const {
1114 const InstrItineraryData
*ItinData
= SchedModel
.getInstrItineraries();
1115 if (!ItinData
|| ItinData
->isEmpty())
1118 unsigned DefClass
= DefMI
.getDesc().getSchedClass();
1119 int DefCycle
= ItinData
->getOperandCycle(DefClass
, DefIdx
);
1120 return (DefCycle
!= -1 && DefCycle
<= 1);
1123 Optional
<ParamLoadedValue
>
1124 TargetInstrInfo::describeLoadedValue(const MachineInstr
&MI
) const {
1125 const MachineFunction
*MF
= MI
.getMF();
1126 const MachineOperand
*Op
= nullptr;
1127 DIExpression
*Expr
= DIExpression::get(MF
->getFunction().getContext(), {});;
1128 const MachineOperand
*SrcRegOp
, *DestRegOp
;
1130 if (isCopyInstr(MI
, SrcRegOp
, DestRegOp
)) {
1132 return ParamLoadedValue(*Op
, Expr
);
1133 } else if (MI
.isMoveImmediate()) {
1134 Op
= &MI
.getOperand(1);
1135 return ParamLoadedValue(*Op
, Expr
);
1141 /// Both DefMI and UseMI must be valid. By default, call directly to the
1142 /// itinerary. This may be overriden by the target.
1143 int TargetInstrInfo::getOperandLatency(const InstrItineraryData
*ItinData
,
1144 const MachineInstr
&DefMI
,
1146 const MachineInstr
&UseMI
,
1147 unsigned UseIdx
) const {
1148 unsigned DefClass
= DefMI
.getDesc().getSchedClass();
1149 unsigned UseClass
= UseMI
.getDesc().getSchedClass();
1150 return ItinData
->getOperandLatency(DefClass
, DefIdx
, UseClass
, UseIdx
);
1153 /// If we can determine the operand latency from the def only, without itinerary
1154 /// lookup, do so. Otherwise return -1.
1155 int TargetInstrInfo::computeDefOperandLatency(
1156 const InstrItineraryData
*ItinData
, const MachineInstr
&DefMI
) const {
1158 // Let the target hook getInstrLatency handle missing itineraries.
1160 return getInstrLatency(ItinData
, DefMI
);
1162 if(ItinData
->isEmpty())
1163 return defaultDefLatency(ItinData
->SchedModel
, DefMI
);
1165 // ...operand lookup required
1169 bool TargetInstrInfo::getRegSequenceInputs(
1170 const MachineInstr
&MI
, unsigned DefIdx
,
1171 SmallVectorImpl
<RegSubRegPairAndIdx
> &InputRegs
) const {
1172 assert((MI
.isRegSequence() ||
1173 MI
.isRegSequenceLike()) && "Instruction do not have the proper type");
1175 if (!MI
.isRegSequence())
1176 return getRegSequenceLikeInputs(MI
, DefIdx
, InputRegs
);
1178 // We are looking at:
1179 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1180 assert(DefIdx
== 0 && "REG_SEQUENCE only has one def");
1181 for (unsigned OpIdx
= 1, EndOpIdx
= MI
.getNumOperands(); OpIdx
!= EndOpIdx
;
1183 const MachineOperand
&MOReg
= MI
.getOperand(OpIdx
);
1184 if (MOReg
.isUndef())
1186 const MachineOperand
&MOSubIdx
= MI
.getOperand(OpIdx
+ 1);
1187 assert(MOSubIdx
.isImm() &&
1188 "One of the subindex of the reg_sequence is not an immediate");
1189 // Record Reg:SubReg, SubIdx.
1190 InputRegs
.push_back(RegSubRegPairAndIdx(MOReg
.getReg(), MOReg
.getSubReg(),
1191 (unsigned)MOSubIdx
.getImm()));
1196 bool TargetInstrInfo::getExtractSubregInputs(
1197 const MachineInstr
&MI
, unsigned DefIdx
,
1198 RegSubRegPairAndIdx
&InputReg
) const {
1199 assert((MI
.isExtractSubreg() ||
1200 MI
.isExtractSubregLike()) && "Instruction do not have the proper type");
1202 if (!MI
.isExtractSubreg())
1203 return getExtractSubregLikeInputs(MI
, DefIdx
, InputReg
);
1205 // We are looking at:
1206 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1207 assert(DefIdx
== 0 && "EXTRACT_SUBREG only has one def");
1208 const MachineOperand
&MOReg
= MI
.getOperand(1);
1209 if (MOReg
.isUndef())
1211 const MachineOperand
&MOSubIdx
= MI
.getOperand(2);
1212 assert(MOSubIdx
.isImm() &&
1213 "The subindex of the extract_subreg is not an immediate");
1215 InputReg
.Reg
= MOReg
.getReg();
1216 InputReg
.SubReg
= MOReg
.getSubReg();
1217 InputReg
.SubIdx
= (unsigned)MOSubIdx
.getImm();
1221 bool TargetInstrInfo::getInsertSubregInputs(
1222 const MachineInstr
&MI
, unsigned DefIdx
,
1223 RegSubRegPair
&BaseReg
, RegSubRegPairAndIdx
&InsertedReg
) const {
1224 assert((MI
.isInsertSubreg() ||
1225 MI
.isInsertSubregLike()) && "Instruction do not have the proper type");
1227 if (!MI
.isInsertSubreg())
1228 return getInsertSubregLikeInputs(MI
, DefIdx
, BaseReg
, InsertedReg
);
1230 // We are looking at:
1231 // Def = INSERT_SEQUENCE v0, v1, sub0.
1232 assert(DefIdx
== 0 && "INSERT_SUBREG only has one def");
1233 const MachineOperand
&MOBaseReg
= MI
.getOperand(1);
1234 const MachineOperand
&MOInsertedReg
= MI
.getOperand(2);
1235 if (MOInsertedReg
.isUndef())
1237 const MachineOperand
&MOSubIdx
= MI
.getOperand(3);
1238 assert(MOSubIdx
.isImm() &&
1239 "One of the subindex of the reg_sequence is not an immediate");
1240 BaseReg
.Reg
= MOBaseReg
.getReg();
1241 BaseReg
.SubReg
= MOBaseReg
.getSubReg();
1243 InsertedReg
.Reg
= MOInsertedReg
.getReg();
1244 InsertedReg
.SubReg
= MOInsertedReg
.getSubReg();
1245 InsertedReg
.SubIdx
= (unsigned)MOSubIdx
.getImm();
1249 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() {}