[llvm-exegesis] Fix missing std::move.
[llvm-complete.git] / lib / CodeGen / TargetInstrInfo.cpp
blob2a17af391105cb8ac7fc7f1bd9f0f575218e16d7
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/CodeGen/TargetInstrInfo.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineMemOperand.h"
18 #include "llvm/CodeGen/MachineRegisterInfo.h"
19 #include "llvm/CodeGen/PseudoSourceValue.h"
20 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
21 #include "llvm/CodeGen/StackMaps.h"
22 #include "llvm/CodeGen/TargetFrameLowering.h"
23 #include "llvm/CodeGen/TargetLowering.h"
24 #include "llvm/CodeGen/TargetRegisterInfo.h"
25 #include "llvm/CodeGen/TargetSchedule.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/MC/MCAsmInfo.h"
28 #include "llvm/MC/MCInstrItineraries.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include <cctype>
35 using namespace llvm;
37 static cl::opt<bool> DisableHazardRecognizer(
38 "disable-sched-hazard", cl::Hidden, cl::init(false),
39 cl::desc("Disable hazard detection during preRA scheduling"));
41 TargetInstrInfo::~TargetInstrInfo() {
44 const TargetRegisterClass*
45 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
46 const TargetRegisterInfo *TRI,
47 const MachineFunction &MF) const {
48 if (OpNum >= MCID.getNumOperands())
49 return nullptr;
51 short RegClass = MCID.OpInfo[OpNum].RegClass;
52 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
53 return TRI->getPointerRegClass(MF, RegClass);
55 // Instructions like INSERT_SUBREG do not have fixed register classes.
56 if (RegClass < 0)
57 return nullptr;
59 // Otherwise just look it up normally.
60 return TRI->getRegClass(RegClass);
63 /// insertNoop - Insert a noop into the instruction stream at the specified
64 /// point.
65 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
66 MachineBasicBlock::iterator MI) const {
67 llvm_unreachable("Target didn't implement insertNoop!");
70 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
71 return strncmp(Str, MAI.getCommentString().data(),
72 MAI.getCommentString().size()) == 0;
75 /// Measure the specified inline asm to determine an approximation of its
76 /// length.
77 /// Comments (which run till the next SeparatorString or newline) do not
78 /// count as an instruction.
79 /// Any other non-whitespace text is considered an instruction, with
80 /// multiple instructions separated by SeparatorString or newlines.
81 /// Variable-length instructions are not handled here; this function
82 /// may be overloaded in the target code to do that.
83 /// We implement a special case of the .space directive which takes only a
84 /// single integer argument in base 10 that is the size in bytes. This is a
85 /// restricted form of the GAS directive in that we only interpret
86 /// simple--i.e. not a logical or arithmetic expression--size values without
87 /// the optional fill value. This is primarily used for creating arbitrary
88 /// sized inline asm blocks for testing purposes.
89 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
90 const MCAsmInfo &MAI) const {
91 // Count the number of instructions in the asm.
92 bool AtInsnStart = true;
93 unsigned Length = 0;
94 for (; *Str; ++Str) {
95 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
96 strlen(MAI.getSeparatorString())) == 0) {
97 AtInsnStart = true;
98 } else if (isAsmComment(Str, MAI)) {
99 // Stop counting as an instruction after a comment until the next
100 // separator.
101 AtInsnStart = false;
104 if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
105 unsigned AddLength = MAI.getMaxInstLength();
106 if (strncmp(Str, ".space", 6) == 0) {
107 char *EStr;
108 int SpaceSize;
109 SpaceSize = strtol(Str + 6, &EStr, 10);
110 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
111 while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
112 ++EStr;
113 if (*EStr == '\0' || *EStr == '\n' ||
114 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
115 AddLength = SpaceSize;
117 Length += AddLength;
118 AtInsnStart = false;
122 return Length;
125 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
126 /// after it, replacing it with an unconditional branch to NewDest.
127 void
128 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
129 MachineBasicBlock *NewDest) const {
130 MachineBasicBlock *MBB = Tail->getParent();
132 // Remove all the old successors of MBB from the CFG.
133 while (!MBB->succ_empty())
134 MBB->removeSuccessor(MBB->succ_begin());
136 // Save off the debug loc before erasing the instruction.
137 DebugLoc DL = Tail->getDebugLoc();
139 // Remove all the dead instructions from the end of MBB.
140 MBB->erase(Tail, MBB->end());
142 // If MBB isn't immediately before MBB, insert a branch to it.
143 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
144 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
145 MBB->addSuccessor(NewDest);
148 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
149 bool NewMI, unsigned Idx1,
150 unsigned Idx2) const {
151 const MCInstrDesc &MCID = MI.getDesc();
152 bool HasDef = MCID.getNumDefs();
153 if (HasDef && !MI.getOperand(0).isReg())
154 // No idea how to commute this instruction. Target should implement its own.
155 return nullptr;
157 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
158 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
159 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
160 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
161 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
162 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
163 "This only knows how to commute register operands so far");
165 unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
166 unsigned Reg1 = MI.getOperand(Idx1).getReg();
167 unsigned Reg2 = MI.getOperand(Idx2).getReg();
168 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
169 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
170 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
171 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
172 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
173 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
174 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
175 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
176 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
177 // Avoid calling isRenamable for virtual registers since we assert that
178 // renamable property is only queried/set for physical registers.
179 bool Reg1IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg1)
180 ? MI.getOperand(Idx1).isRenamable()
181 : false;
182 bool Reg2IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg2)
183 ? MI.getOperand(Idx2).isRenamable()
184 : false;
185 // If destination is tied to either of the commuted source register, then
186 // it must be updated.
187 if (HasDef && Reg0 == Reg1 &&
188 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
189 Reg2IsKill = false;
190 Reg0 = Reg2;
191 SubReg0 = SubReg2;
192 } else if (HasDef && Reg0 == Reg2 &&
193 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
194 Reg1IsKill = false;
195 Reg0 = Reg1;
196 SubReg0 = SubReg1;
199 MachineInstr *CommutedMI = nullptr;
200 if (NewMI) {
201 // Create a new instruction.
202 MachineFunction &MF = *MI.getMF();
203 CommutedMI = MF.CloneMachineInstr(&MI);
204 } else {
205 CommutedMI = &MI;
208 if (HasDef) {
209 CommutedMI->getOperand(0).setReg(Reg0);
210 CommutedMI->getOperand(0).setSubReg(SubReg0);
212 CommutedMI->getOperand(Idx2).setReg(Reg1);
213 CommutedMI->getOperand(Idx1).setReg(Reg2);
214 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
215 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
216 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
217 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
218 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
219 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
220 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
221 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
222 // Avoid calling setIsRenamable for virtual registers since we assert that
223 // renamable property is only queried/set for physical registers.
224 if (TargetRegisterInfo::isPhysicalRegister(Reg1))
225 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
226 if (TargetRegisterInfo::isPhysicalRegister(Reg2))
227 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
228 return CommutedMI;
231 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
232 unsigned OpIdx1,
233 unsigned OpIdx2) const {
234 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
235 // any commutable operand, which is done in findCommutedOpIndices() method
236 // called below.
237 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
238 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
239 assert(MI.isCommutable() &&
240 "Precondition violation: MI must be commutable.");
241 return nullptr;
243 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
246 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
247 unsigned &ResultIdx2,
248 unsigned CommutableOpIdx1,
249 unsigned CommutableOpIdx2) {
250 if (ResultIdx1 == CommuteAnyOperandIndex &&
251 ResultIdx2 == CommuteAnyOperandIndex) {
252 ResultIdx1 = CommutableOpIdx1;
253 ResultIdx2 = CommutableOpIdx2;
254 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
255 if (ResultIdx2 == CommutableOpIdx1)
256 ResultIdx1 = CommutableOpIdx2;
257 else if (ResultIdx2 == CommutableOpIdx2)
258 ResultIdx1 = CommutableOpIdx1;
259 else
260 return false;
261 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
262 if (ResultIdx1 == CommutableOpIdx1)
263 ResultIdx2 = CommutableOpIdx2;
264 else if (ResultIdx1 == CommutableOpIdx2)
265 ResultIdx2 = CommutableOpIdx1;
266 else
267 return false;
268 } else
269 // Check that the result operand indices match the given commutable
270 // operand indices.
271 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
272 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
274 return true;
277 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI,
278 unsigned &SrcOpIdx1,
279 unsigned &SrcOpIdx2) const {
280 assert(!MI.isBundle() &&
281 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
283 const MCInstrDesc &MCID = MI.getDesc();
284 if (!MCID.isCommutable())
285 return false;
287 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
288 // is not true, then the target must implement this.
289 unsigned CommutableOpIdx1 = MCID.getNumDefs();
290 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
291 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
292 CommutableOpIdx1, CommutableOpIdx2))
293 return false;
295 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
296 // No idea.
297 return false;
298 return true;
301 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
302 if (!MI.isTerminator()) return false;
304 // Conditional branch is a special case.
305 if (MI.isBranch() && !MI.isBarrier())
306 return true;
307 if (!MI.isPredicable())
308 return true;
309 return !isPredicated(MI);
312 bool TargetInstrInfo::PredicateInstruction(
313 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
314 bool MadeChange = false;
316 assert(!MI.isBundle() &&
317 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
319 const MCInstrDesc &MCID = MI.getDesc();
320 if (!MI.isPredicable())
321 return false;
323 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
324 if (MCID.OpInfo[i].isPredicate()) {
325 MachineOperand &MO = MI.getOperand(i);
326 if (MO.isReg()) {
327 MO.setReg(Pred[j].getReg());
328 MadeChange = true;
329 } else if (MO.isImm()) {
330 MO.setImm(Pred[j].getImm());
331 MadeChange = true;
332 } else if (MO.isMBB()) {
333 MO.setMBB(Pred[j].getMBB());
334 MadeChange = true;
336 ++j;
339 return MadeChange;
342 bool TargetInstrInfo::hasLoadFromStackSlot(
343 const MachineInstr &MI,
344 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
345 size_t StartSize = Accesses.size();
346 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
347 oe = MI.memoperands_end();
348 o != oe; ++o) {
349 if ((*o)->isLoad() &&
350 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
351 Accesses.push_back(*o);
353 return Accesses.size() != StartSize;
356 bool TargetInstrInfo::hasStoreToStackSlot(
357 const MachineInstr &MI,
358 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
359 size_t StartSize = Accesses.size();
360 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
361 oe = MI.memoperands_end();
362 o != oe; ++o) {
363 if ((*o)->isStore() &&
364 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
365 Accesses.push_back(*o);
367 return Accesses.size() != StartSize;
370 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
371 unsigned SubIdx, unsigned &Size,
372 unsigned &Offset,
373 const MachineFunction &MF) const {
374 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
375 if (!SubIdx) {
376 Size = TRI->getSpillSize(*RC);
377 Offset = 0;
378 return true;
380 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
381 // Convert bit size to byte size.
382 if (BitSize % 8)
383 return false;
385 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
386 if (BitOffset < 0 || BitOffset % 8)
387 return false;
389 Size = BitSize /= 8;
390 Offset = (unsigned)BitOffset / 8;
392 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
394 if (!MF.getDataLayout().isLittleEndian()) {
395 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
397 return true;
400 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
401 MachineBasicBlock::iterator I,
402 unsigned DestReg, unsigned SubIdx,
403 const MachineInstr &Orig,
404 const TargetRegisterInfo &TRI) const {
405 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
406 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
407 MBB.insert(I, MI);
410 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
411 const MachineInstr &MI1,
412 const MachineRegisterInfo *MRI) const {
413 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
416 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
417 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
418 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
419 MachineFunction &MF = *MBB.getParent();
420 return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
423 // If the COPY instruction in MI can be folded to a stack operation, return
424 // the register class to use.
425 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
426 unsigned FoldIdx) {
427 assert(MI.isCopy() && "MI must be a COPY instruction");
428 if (MI.getNumOperands() != 2)
429 return nullptr;
430 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
432 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
433 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
435 if (FoldOp.getSubReg() || LiveOp.getSubReg())
436 return nullptr;
438 unsigned FoldReg = FoldOp.getReg();
439 unsigned LiveReg = LiveOp.getReg();
441 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
442 "Cannot fold physregs");
444 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
445 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
447 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
448 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
450 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
451 return RC;
453 // FIXME: Allow folding when register classes are memory compatible.
454 return nullptr;
457 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
458 llvm_unreachable("Not implemented");
461 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
462 ArrayRef<unsigned> Ops, int FrameIndex,
463 const TargetInstrInfo &TII) {
464 unsigned StartIdx = 0;
465 switch (MI.getOpcode()) {
466 case TargetOpcode::STACKMAP: {
467 // StackMapLiveValues are foldable
468 StartIdx = StackMapOpers(&MI).getVarIdx();
469 break;
471 case TargetOpcode::PATCHPOINT: {
472 // For PatchPoint, the call args are not foldable (even if reported in the
473 // stackmap e.g. via anyregcc).
474 StartIdx = PatchPointOpers(&MI).getVarIdx();
475 break;
477 case TargetOpcode::STATEPOINT: {
478 // For statepoints, fold deopt and gc arguments, but not call arguments.
479 StartIdx = StatepointOpers(&MI).getVarIdx();
480 break;
482 default:
483 llvm_unreachable("unexpected stackmap opcode");
486 // Return false if any operands requested for folding are not foldable (not
487 // part of the stackmap's live values).
488 for (unsigned Op : Ops) {
489 if (Op < StartIdx)
490 return nullptr;
493 MachineInstr *NewMI =
494 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
495 MachineInstrBuilder MIB(MF, NewMI);
497 // No need to fold return, the meta data, and function arguments
498 for (unsigned i = 0; i < StartIdx; ++i)
499 MIB.add(MI.getOperand(i));
501 for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
502 MachineOperand &MO = MI.getOperand(i);
503 if (is_contained(Ops, i)) {
504 unsigned SpillSize;
505 unsigned SpillOffset;
506 // Compute the spill slot size and offset.
507 const TargetRegisterClass *RC =
508 MF.getRegInfo().getRegClass(MO.getReg());
509 bool Valid =
510 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
511 if (!Valid)
512 report_fatal_error("cannot spill patchpoint subregister operand");
513 MIB.addImm(StackMaps::IndirectMemRefOp);
514 MIB.addImm(SpillSize);
515 MIB.addFrameIndex(FrameIndex);
516 MIB.addImm(SpillOffset);
518 else
519 MIB.add(MO);
521 return NewMI;
524 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
525 ArrayRef<unsigned> Ops, int FI,
526 LiveIntervals *LIS) const {
527 auto Flags = MachineMemOperand::MONone;
528 for (unsigned OpIdx : Ops)
529 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
530 : MachineMemOperand::MOLoad;
532 MachineBasicBlock *MBB = MI.getParent();
533 assert(MBB && "foldMemoryOperand needs an inserted instruction");
534 MachineFunction &MF = *MBB->getParent();
536 // If we're not folding a load into a subreg, the size of the load is the
537 // size of the spill slot. But if we are, we need to figure out what the
538 // actual load size is.
539 int64_t MemSize = 0;
540 const MachineFrameInfo &MFI = MF.getFrameInfo();
541 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
543 if (Flags & MachineMemOperand::MOStore) {
544 MemSize = MFI.getObjectSize(FI);
545 } else {
546 for (unsigned OpIdx : Ops) {
547 int64_t OpSize = MFI.getObjectSize(FI);
549 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
550 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
551 if (SubRegSize > 0 && !(SubRegSize % 8))
552 OpSize = SubRegSize / 8;
555 MemSize = std::max(MemSize, OpSize);
559 assert(MemSize && "Did not expect a zero-sized stack slot");
561 MachineInstr *NewMI = nullptr;
563 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
564 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
565 MI.getOpcode() == TargetOpcode::STATEPOINT) {
566 // Fold stackmap/patchpoint.
567 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
568 if (NewMI)
569 MBB->insert(MI, NewMI);
570 } else {
571 // Ask the target to do the actual folding.
572 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
575 if (NewMI) {
576 NewMI->setMemRefs(MF, MI.memoperands());
577 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
578 assert((!(Flags & MachineMemOperand::MOStore) ||
579 NewMI->mayStore()) &&
580 "Folded a def to a non-store!");
581 assert((!(Flags & MachineMemOperand::MOLoad) ||
582 NewMI->mayLoad()) &&
583 "Folded a use to a non-load!");
584 assert(MFI.getObjectOffset(FI) != -1);
585 MachineMemOperand *MMO = MF.getMachineMemOperand(
586 MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
587 MFI.getObjectAlignment(FI));
588 NewMI->addMemOperand(MF, MMO);
590 return NewMI;
593 // Straight COPY may fold as load/store.
594 if (!MI.isCopy() || Ops.size() != 1)
595 return nullptr;
597 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
598 if (!RC)
599 return nullptr;
601 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
602 MachineBasicBlock::iterator Pos = MI;
604 if (Flags == MachineMemOperand::MOStore)
605 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
606 else
607 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
608 return &*--Pos;
611 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
612 ArrayRef<unsigned> Ops,
613 MachineInstr &LoadMI,
614 LiveIntervals *LIS) const {
615 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
616 #ifndef NDEBUG
617 for (unsigned OpIdx : Ops)
618 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
619 #endif
621 MachineBasicBlock &MBB = *MI.getParent();
622 MachineFunction &MF = *MBB.getParent();
624 // Ask the target to do the actual folding.
625 MachineInstr *NewMI = nullptr;
626 int FrameIndex = 0;
628 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
629 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
630 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
631 isLoadFromStackSlot(LoadMI, FrameIndex)) {
632 // Fold stackmap/patchpoint.
633 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
634 if (NewMI)
635 NewMI = &*MBB.insert(MI, NewMI);
636 } else {
637 // Ask the target to do the actual folding.
638 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
641 if (!NewMI)
642 return nullptr;
644 // Copy the memoperands from the load to the folded instruction.
645 if (MI.memoperands_empty()) {
646 NewMI->setMemRefs(MF, LoadMI.memoperands());
647 } else {
648 // Handle the rare case of folding multiple loads.
649 NewMI->setMemRefs(MF, MI.memoperands());
650 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
651 E = LoadMI.memoperands_end();
652 I != E; ++I) {
653 NewMI->addMemOperand(MF, *I);
656 return NewMI;
659 bool TargetInstrInfo::hasReassociableOperands(
660 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
661 const MachineOperand &Op1 = Inst.getOperand(1);
662 const MachineOperand &Op2 = Inst.getOperand(2);
663 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
665 // We need virtual register definitions for the operands that we will
666 // reassociate.
667 MachineInstr *MI1 = nullptr;
668 MachineInstr *MI2 = nullptr;
669 if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
670 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
671 if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
672 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
674 // And they need to be in the trace (otherwise, they won't have a depth).
675 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
678 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
679 bool &Commuted) const {
680 const MachineBasicBlock *MBB = Inst.getParent();
681 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
682 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
683 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
684 unsigned AssocOpcode = Inst.getOpcode();
686 // If only one operand has the same opcode and it's the second source operand,
687 // the operands must be commuted.
688 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
689 if (Commuted)
690 std::swap(MI1, MI2);
692 // 1. The previous instruction must be the same type as Inst.
693 // 2. The previous instruction must have virtual register definitions for its
694 // operands in the same basic block as Inst.
695 // 3. The previous instruction's result must only be used by Inst.
696 return MI1->getOpcode() == AssocOpcode &&
697 hasReassociableOperands(*MI1, MBB) &&
698 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
701 // 1. The operation must be associative and commutative.
702 // 2. The instruction must have virtual register definitions for its
703 // operands in the same basic block.
704 // 3. The instruction must have a reassociable sibling.
705 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
706 bool &Commuted) const {
707 return isAssociativeAndCommutative(Inst) &&
708 hasReassociableOperands(Inst, Inst.getParent()) &&
709 hasReassociableSibling(Inst, Commuted);
712 // The concept of the reassociation pass is that these operations can benefit
713 // from this kind of transformation:
715 // A = ? op ?
716 // B = A op X (Prev)
717 // C = B op Y (Root)
718 // -->
719 // A = ? op ?
720 // B = X op Y
721 // C = A op B
723 // breaking the dependency between A and B, allowing them to be executed in
724 // parallel (or back-to-back in a pipeline) instead of depending on each other.
726 // FIXME: This has the potential to be expensive (compile time) while not
727 // improving the code at all. Some ways to limit the overhead:
728 // 1. Track successful transforms; bail out if hit rate gets too low.
729 // 2. Only enable at -O3 or some other non-default optimization level.
730 // 3. Pre-screen pattern candidates here: if an operand of the previous
731 // instruction is known to not increase the critical path, then don't match
732 // that pattern.
733 bool TargetInstrInfo::getMachineCombinerPatterns(
734 MachineInstr &Root,
735 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
736 bool Commute;
737 if (isReassociationCandidate(Root, Commute)) {
738 // We found a sequence of instructions that may be suitable for a
739 // reassociation of operands to increase ILP. Specify each commutation
740 // possibility for the Prev instruction in the sequence and let the
741 // machine combiner decide if changing the operands is worthwhile.
742 if (Commute) {
743 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
744 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
745 } else {
746 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
747 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
749 return true;
752 return false;
755 /// Return true when a code sequence can improve loop throughput.
756 bool
757 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
758 return false;
761 /// Attempt the reassociation transformation to reduce critical path length.
762 /// See the above comments before getMachineCombinerPatterns().
763 void TargetInstrInfo::reassociateOps(
764 MachineInstr &Root, MachineInstr &Prev,
765 MachineCombinerPattern Pattern,
766 SmallVectorImpl<MachineInstr *> &InsInstrs,
767 SmallVectorImpl<MachineInstr *> &DelInstrs,
768 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
769 MachineFunction *MF = Root.getMF();
770 MachineRegisterInfo &MRI = MF->getRegInfo();
771 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
772 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
773 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
775 // This array encodes the operand index for each parameter because the
776 // operands may be commuted. Each row corresponds to a pattern value,
777 // and each column specifies the index of A, B, X, Y.
778 unsigned OpIdx[4][4] = {
779 { 1, 1, 2, 2 },
780 { 1, 2, 2, 1 },
781 { 2, 1, 1, 2 },
782 { 2, 2, 1, 1 }
785 int Row;
786 switch (Pattern) {
787 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
788 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
789 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
790 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
791 default: llvm_unreachable("unexpected MachineCombinerPattern");
794 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
795 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
796 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
797 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
798 MachineOperand &OpC = Root.getOperand(0);
800 unsigned RegA = OpA.getReg();
801 unsigned RegB = OpB.getReg();
802 unsigned RegX = OpX.getReg();
803 unsigned RegY = OpY.getReg();
804 unsigned RegC = OpC.getReg();
806 if (TargetRegisterInfo::isVirtualRegister(RegA))
807 MRI.constrainRegClass(RegA, RC);
808 if (TargetRegisterInfo::isVirtualRegister(RegB))
809 MRI.constrainRegClass(RegB, RC);
810 if (TargetRegisterInfo::isVirtualRegister(RegX))
811 MRI.constrainRegClass(RegX, RC);
812 if (TargetRegisterInfo::isVirtualRegister(RegY))
813 MRI.constrainRegClass(RegY, RC);
814 if (TargetRegisterInfo::isVirtualRegister(RegC))
815 MRI.constrainRegClass(RegC, RC);
817 // Create a new virtual register for the result of (X op Y) instead of
818 // recycling RegB because the MachineCombiner's computation of the critical
819 // path requires a new register definition rather than an existing one.
820 unsigned NewVR = MRI.createVirtualRegister(RC);
821 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
823 unsigned Opcode = Root.getOpcode();
824 bool KillA = OpA.isKill();
825 bool KillX = OpX.isKill();
826 bool KillY = OpY.isKill();
828 // Create new instructions for insertion.
829 MachineInstrBuilder MIB1 =
830 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
831 .addReg(RegX, getKillRegState(KillX))
832 .addReg(RegY, getKillRegState(KillY));
833 MachineInstrBuilder MIB2 =
834 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
835 .addReg(RegA, getKillRegState(KillA))
836 .addReg(NewVR, getKillRegState(true));
838 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
840 // Record new instructions for insertion and old instructions for deletion.
841 InsInstrs.push_back(MIB1);
842 InsInstrs.push_back(MIB2);
843 DelInstrs.push_back(&Prev);
844 DelInstrs.push_back(&Root);
847 void TargetInstrInfo::genAlternativeCodeSequence(
848 MachineInstr &Root, MachineCombinerPattern Pattern,
849 SmallVectorImpl<MachineInstr *> &InsInstrs,
850 SmallVectorImpl<MachineInstr *> &DelInstrs,
851 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
852 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
854 // Select the previous instruction in the sequence based on the input pattern.
855 MachineInstr *Prev = nullptr;
856 switch (Pattern) {
857 case MachineCombinerPattern::REASSOC_AX_BY:
858 case MachineCombinerPattern::REASSOC_XA_BY:
859 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
860 break;
861 case MachineCombinerPattern::REASSOC_AX_YB:
862 case MachineCombinerPattern::REASSOC_XA_YB:
863 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
864 break;
865 default:
866 break;
869 assert(Prev && "Unknown pattern for machine combiner");
871 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
874 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
875 const MachineInstr &MI, AliasAnalysis *AA) const {
876 const MachineFunction &MF = *MI.getMF();
877 const MachineRegisterInfo &MRI = MF.getRegInfo();
879 // Remat clients assume operand 0 is the defined register.
880 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
881 return false;
882 unsigned DefReg = MI.getOperand(0).getReg();
884 // A sub-register definition can only be rematerialized if the instruction
885 // doesn't read the other parts of the register. Otherwise it is really a
886 // read-modify-write operation on the full virtual register which cannot be
887 // moved safely.
888 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
889 MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
890 return false;
892 // A load from a fixed stack slot can be rematerialized. This may be
893 // redundant with subsequent checks, but it's target-independent,
894 // simple, and a common case.
895 int FrameIdx = 0;
896 if (isLoadFromStackSlot(MI, FrameIdx) &&
897 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
898 return true;
900 // Avoid instructions obviously unsafe for remat.
901 if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
902 return false;
904 // Don't remat inline asm. We have no idea how expensive it is
905 // even if it's side effect free.
906 if (MI.isInlineAsm())
907 return false;
909 // Avoid instructions which load from potentially varying memory.
910 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
911 return false;
913 // If any of the registers accessed are non-constant, conservatively assume
914 // the instruction is not rematerializable.
915 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
916 const MachineOperand &MO = MI.getOperand(i);
917 if (!MO.isReg()) continue;
918 unsigned Reg = MO.getReg();
919 if (Reg == 0)
920 continue;
922 // Check for a well-behaved physical register.
923 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
924 if (MO.isUse()) {
925 // If the physreg has no defs anywhere, it's just an ambient register
926 // and we can freely move its uses. Alternatively, if it's allocatable,
927 // it could get allocated to something with a def during allocation.
928 if (!MRI.isConstantPhysReg(Reg))
929 return false;
930 } else {
931 // A physreg def. We can't remat it.
932 return false;
934 continue;
937 // Only allow one virtual-register def. There may be multiple defs of the
938 // same virtual register, though.
939 if (MO.isDef() && Reg != DefReg)
940 return false;
942 // Don't allow any virtual-register uses. Rematting an instruction with
943 // virtual register uses would length the live ranges of the uses, which
944 // is not necessarily a good idea, certainly not "trivial".
945 if (MO.isUse())
946 return false;
949 // Everything checked out.
950 return true;
953 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
954 const MachineFunction *MF = MI.getMF();
955 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
956 bool StackGrowsDown =
957 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
959 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
960 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
962 if (!isFrameInstr(MI))
963 return 0;
965 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
967 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
968 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
969 SPAdj = -SPAdj;
971 return SPAdj;
974 /// isSchedulingBoundary - Test if the given instruction should be
975 /// considered a scheduling boundary. This primarily includes labels
976 /// and terminators.
977 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
978 const MachineBasicBlock *MBB,
979 const MachineFunction &MF) const {
980 // Terminators and labels can't be scheduled around.
981 if (MI.isTerminator() || MI.isPosition())
982 return true;
984 // Don't attempt to schedule around any instruction that defines
985 // a stack-oriented pointer, as it's unlikely to be profitable. This
986 // saves compile time, because it doesn't require every single
987 // stack slot reference to depend on the instruction that does the
988 // modification.
989 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
990 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
991 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
994 // Provide a global flag for disabling the PreRA hazard recognizer that targets
995 // may choose to honor.
996 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
997 return !DisableHazardRecognizer;
1000 // Default implementation of CreateTargetRAHazardRecognizer.
1001 ScheduleHazardRecognizer *TargetInstrInfo::
1002 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1003 const ScheduleDAG *DAG) const {
1004 // Dummy hazard recognizer allows all instructions to issue.
1005 return new ScheduleHazardRecognizer();
1008 // Default implementation of CreateTargetMIHazardRecognizer.
1009 ScheduleHazardRecognizer *TargetInstrInfo::
1010 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
1011 const ScheduleDAG *DAG) const {
1012 return (ScheduleHazardRecognizer *)
1013 new ScoreboardHazardRecognizer(II, DAG, "misched");
1016 // Default implementation of CreateTargetPostRAHazardRecognizer.
1017 ScheduleHazardRecognizer *TargetInstrInfo::
1018 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1019 const ScheduleDAG *DAG) const {
1020 return (ScheduleHazardRecognizer *)
1021 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1024 //===----------------------------------------------------------------------===//
1025 // SelectionDAG latency interface.
1026 //===----------------------------------------------------------------------===//
1029 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1030 SDNode *DefNode, unsigned DefIdx,
1031 SDNode *UseNode, unsigned UseIdx) const {
1032 if (!ItinData || ItinData->isEmpty())
1033 return -1;
1035 if (!DefNode->isMachineOpcode())
1036 return -1;
1038 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1039 if (!UseNode->isMachineOpcode())
1040 return ItinData->getOperandCycle(DefClass, DefIdx);
1041 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1042 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1045 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1046 SDNode *N) const {
1047 if (!ItinData || ItinData->isEmpty())
1048 return 1;
1050 if (!N->isMachineOpcode())
1051 return 1;
1053 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1056 //===----------------------------------------------------------------------===//
1057 // MachineInstr latency interface.
1058 //===----------------------------------------------------------------------===//
1060 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1061 const MachineInstr &MI) const {
1062 if (!ItinData || ItinData->isEmpty())
1063 return 1;
1065 unsigned Class = MI.getDesc().getSchedClass();
1066 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1067 if (UOps >= 0)
1068 return UOps;
1070 // The # of u-ops is dynamically determined. The specific target should
1071 // override this function to return the right number.
1072 return 1;
1075 /// Return the default expected latency for a def based on it's opcode.
1076 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1077 const MachineInstr &DefMI) const {
1078 if (DefMI.isTransient())
1079 return 0;
1080 if (DefMI.mayLoad())
1081 return SchedModel.LoadLatency;
1082 if (isHighLatencyDef(DefMI.getOpcode()))
1083 return SchedModel.HighLatency;
1084 return 1;
1087 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1088 return 0;
1091 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1092 const MachineInstr &MI,
1093 unsigned *PredCost) const {
1094 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1095 // still have a MinLatency property, which getStageLatency checks.
1096 if (!ItinData)
1097 return MI.mayLoad() ? 2 : 1;
1099 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1102 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1103 const MachineInstr &DefMI,
1104 unsigned DefIdx) const {
1105 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1106 if (!ItinData || ItinData->isEmpty())
1107 return false;
1109 unsigned DefClass = DefMI.getDesc().getSchedClass();
1110 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1111 return (DefCycle != -1 && DefCycle <= 1);
1114 /// Both DefMI and UseMI must be valid. By default, call directly to the
1115 /// itinerary. This may be overriden by the target.
1116 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1117 const MachineInstr &DefMI,
1118 unsigned DefIdx,
1119 const MachineInstr &UseMI,
1120 unsigned UseIdx) const {
1121 unsigned DefClass = DefMI.getDesc().getSchedClass();
1122 unsigned UseClass = UseMI.getDesc().getSchedClass();
1123 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1126 /// If we can determine the operand latency from the def only, without itinerary
1127 /// lookup, do so. Otherwise return -1.
1128 int TargetInstrInfo::computeDefOperandLatency(
1129 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1131 // Let the target hook getInstrLatency handle missing itineraries.
1132 if (!ItinData)
1133 return getInstrLatency(ItinData, DefMI);
1135 if(ItinData->isEmpty())
1136 return defaultDefLatency(ItinData->SchedModel, DefMI);
1138 // ...operand lookup required
1139 return -1;
1142 bool TargetInstrInfo::getRegSequenceInputs(
1143 const MachineInstr &MI, unsigned DefIdx,
1144 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1145 assert((MI.isRegSequence() ||
1146 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1148 if (!MI.isRegSequence())
1149 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1151 // We are looking at:
1152 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1153 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1154 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1155 OpIdx += 2) {
1156 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1157 if (MOReg.isUndef())
1158 continue;
1159 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1160 assert(MOSubIdx.isImm() &&
1161 "One of the subindex of the reg_sequence is not an immediate");
1162 // Record Reg:SubReg, SubIdx.
1163 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1164 (unsigned)MOSubIdx.getImm()));
1166 return true;
1169 bool TargetInstrInfo::getExtractSubregInputs(
1170 const MachineInstr &MI, unsigned DefIdx,
1171 RegSubRegPairAndIdx &InputReg) const {
1172 assert((MI.isExtractSubreg() ||
1173 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1175 if (!MI.isExtractSubreg())
1176 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1178 // We are looking at:
1179 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1180 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1181 const MachineOperand &MOReg = MI.getOperand(1);
1182 if (MOReg.isUndef())
1183 return false;
1184 const MachineOperand &MOSubIdx = MI.getOperand(2);
1185 assert(MOSubIdx.isImm() &&
1186 "The subindex of the extract_subreg is not an immediate");
1188 InputReg.Reg = MOReg.getReg();
1189 InputReg.SubReg = MOReg.getSubReg();
1190 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1191 return true;
1194 bool TargetInstrInfo::getInsertSubregInputs(
1195 const MachineInstr &MI, unsigned DefIdx,
1196 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1197 assert((MI.isInsertSubreg() ||
1198 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1200 if (!MI.isInsertSubreg())
1201 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1203 // We are looking at:
1204 // Def = INSERT_SEQUENCE v0, v1, sub0.
1205 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1206 const MachineOperand &MOBaseReg = MI.getOperand(1);
1207 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1208 if (MOInsertedReg.isUndef())
1209 return false;
1210 const MachineOperand &MOSubIdx = MI.getOperand(3);
1211 assert(MOSubIdx.isImm() &&
1212 "One of the subindex of the reg_sequence is not an immediate");
1213 BaseReg.Reg = MOBaseReg.getReg();
1214 BaseReg.SubReg = MOBaseReg.getSubReg();
1216 InsertedReg.Reg = MOInsertedReg.getReg();
1217 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1218 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1219 return true;