[DAGCombiner] Eliminate dead stores to stack.
[llvm-complete.git] / lib / CodeGen / TargetInstrInfo.cpp
blobb3aeaf8272598b44e62fa40a175f467a4caf6182
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/CodeGen/MachineFrameInfo.h"
15 #include "llvm/CodeGen/MachineInstrBuilder.h"
16 #include "llvm/CodeGen/MachineMemOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/PseudoSourceValue.h"
19 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
20 #include "llvm/CodeGen/StackMaps.h"
21 #include "llvm/CodeGen/TargetFrameLowering.h"
22 #include "llvm/CodeGen/TargetLowering.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/CodeGen/TargetSchedule.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/MC/MCAsmInfo.h"
27 #include "llvm/MC/MCInstrItineraries.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetMachine.h"
32 #include <cctype>
34 using namespace llvm;
36 static cl::opt<bool> DisableHazardRecognizer(
37 "disable-sched-hazard", cl::Hidden, cl::init(false),
38 cl::desc("Disable hazard detection during preRA scheduling"));
40 TargetInstrInfo::~TargetInstrInfo() {
43 const TargetRegisterClass*
44 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
45 const TargetRegisterInfo *TRI,
46 const MachineFunction &MF) const {
47 if (OpNum >= MCID.getNumOperands())
48 return nullptr;
50 short RegClass = MCID.OpInfo[OpNum].RegClass;
51 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
52 return TRI->getPointerRegClass(MF, RegClass);
54 // Instructions like INSERT_SUBREG do not have fixed register classes.
55 if (RegClass < 0)
56 return nullptr;
58 // Otherwise just look it up normally.
59 return TRI->getRegClass(RegClass);
62 /// insertNoop - Insert a noop into the instruction stream at the specified
63 /// point.
64 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
65 MachineBasicBlock::iterator MI) const {
66 llvm_unreachable("Target didn't implement insertNoop!");
69 static bool isAsmComment(const char *Str, const MCAsmInfo &MAI) {
70 return strncmp(Str, MAI.getCommentString().data(),
71 MAI.getCommentString().size()) == 0;
74 /// Measure the specified inline asm to determine an approximation of its
75 /// length.
76 /// Comments (which run till the next SeparatorString or newline) do not
77 /// count as an instruction.
78 /// Any other non-whitespace text is considered an instruction, with
79 /// multiple instructions separated by SeparatorString or newlines.
80 /// Variable-length instructions are not handled here; this function
81 /// may be overloaded in the target code to do that.
82 /// We implement a special case of the .space directive which takes only a
83 /// single integer argument in base 10 that is the size in bytes. This is a
84 /// restricted form of the GAS directive in that we only interpret
85 /// simple--i.e. not a logical or arithmetic expression--size values without
86 /// the optional fill value. This is primarily used for creating arbitrary
87 /// sized inline asm blocks for testing purposes.
88 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
89 const MCAsmInfo &MAI) const {
90 // Count the number of instructions in the asm.
91 bool AtInsnStart = true;
92 unsigned Length = 0;
93 for (; *Str; ++Str) {
94 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
95 strlen(MAI.getSeparatorString())) == 0) {
96 AtInsnStart = true;
97 } else if (isAsmComment(Str, MAI)) {
98 // Stop counting as an instruction after a comment until the next
99 // separator.
100 AtInsnStart = false;
103 if (AtInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
104 unsigned AddLength = MAI.getMaxInstLength();
105 if (strncmp(Str, ".space", 6) == 0) {
106 char *EStr;
107 int SpaceSize;
108 SpaceSize = strtol(Str + 6, &EStr, 10);
109 SpaceSize = SpaceSize < 0 ? 0 : SpaceSize;
110 while (*EStr != '\n' && std::isspace(static_cast<unsigned char>(*EStr)))
111 ++EStr;
112 if (*EStr == '\0' || *EStr == '\n' ||
113 isAsmComment(EStr, MAI)) // Successfully parsed .space argument
114 AddLength = SpaceSize;
116 Length += AddLength;
117 AtInsnStart = false;
121 return Length;
124 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
125 /// after it, replacing it with an unconditional branch to NewDest.
126 void
127 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
128 MachineBasicBlock *NewDest) const {
129 MachineBasicBlock *MBB = Tail->getParent();
131 // Remove all the old successors of MBB from the CFG.
132 while (!MBB->succ_empty())
133 MBB->removeSuccessor(MBB->succ_begin());
135 // Save off the debug loc before erasing the instruction.
136 DebugLoc DL = Tail->getDebugLoc();
138 // Remove all the dead instructions from the end of MBB.
139 MBB->erase(Tail, MBB->end());
141 // If MBB isn't immediately before MBB, insert a branch to it.
142 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
143 insertBranch(*MBB, NewDest, nullptr, SmallVector<MachineOperand, 0>(), DL);
144 MBB->addSuccessor(NewDest);
147 MachineInstr *TargetInstrInfo::commuteInstructionImpl(MachineInstr &MI,
148 bool NewMI, unsigned Idx1,
149 unsigned Idx2) const {
150 const MCInstrDesc &MCID = MI.getDesc();
151 bool HasDef = MCID.getNumDefs();
152 if (HasDef && !MI.getOperand(0).isReg())
153 // No idea how to commute this instruction. Target should implement its own.
154 return nullptr;
156 unsigned CommutableOpIdx1 = Idx1; (void)CommutableOpIdx1;
157 unsigned CommutableOpIdx2 = Idx2; (void)CommutableOpIdx2;
158 assert(findCommutedOpIndices(MI, CommutableOpIdx1, CommutableOpIdx2) &&
159 CommutableOpIdx1 == Idx1 && CommutableOpIdx2 == Idx2 &&
160 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
161 assert(MI.getOperand(Idx1).isReg() && MI.getOperand(Idx2).isReg() &&
162 "This only knows how to commute register operands so far");
164 unsigned Reg0 = HasDef ? MI.getOperand(0).getReg() : 0;
165 unsigned Reg1 = MI.getOperand(Idx1).getReg();
166 unsigned Reg2 = MI.getOperand(Idx2).getReg();
167 unsigned SubReg0 = HasDef ? MI.getOperand(0).getSubReg() : 0;
168 unsigned SubReg1 = MI.getOperand(Idx1).getSubReg();
169 unsigned SubReg2 = MI.getOperand(Idx2).getSubReg();
170 bool Reg1IsKill = MI.getOperand(Idx1).isKill();
171 bool Reg2IsKill = MI.getOperand(Idx2).isKill();
172 bool Reg1IsUndef = MI.getOperand(Idx1).isUndef();
173 bool Reg2IsUndef = MI.getOperand(Idx2).isUndef();
174 bool Reg1IsInternal = MI.getOperand(Idx1).isInternalRead();
175 bool Reg2IsInternal = MI.getOperand(Idx2).isInternalRead();
176 // Avoid calling isRenamable for virtual registers since we assert that
177 // renamable property is only queried/set for physical registers.
178 bool Reg1IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg1)
179 ? MI.getOperand(Idx1).isRenamable()
180 : false;
181 bool Reg2IsRenamable = TargetRegisterInfo::isPhysicalRegister(Reg2)
182 ? MI.getOperand(Idx2).isRenamable()
183 : false;
184 // If destination is tied to either of the commuted source register, then
185 // it must be updated.
186 if (HasDef && Reg0 == Reg1 &&
187 MI.getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
188 Reg2IsKill = false;
189 Reg0 = Reg2;
190 SubReg0 = SubReg2;
191 } else if (HasDef && Reg0 == Reg2 &&
192 MI.getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
193 Reg1IsKill = false;
194 Reg0 = Reg1;
195 SubReg0 = SubReg1;
198 MachineInstr *CommutedMI = nullptr;
199 if (NewMI) {
200 // Create a new instruction.
201 MachineFunction &MF = *MI.getMF();
202 CommutedMI = MF.CloneMachineInstr(&MI);
203 } else {
204 CommutedMI = &MI;
207 if (HasDef) {
208 CommutedMI->getOperand(0).setReg(Reg0);
209 CommutedMI->getOperand(0).setSubReg(SubReg0);
211 CommutedMI->getOperand(Idx2).setReg(Reg1);
212 CommutedMI->getOperand(Idx1).setReg(Reg2);
213 CommutedMI->getOperand(Idx2).setSubReg(SubReg1);
214 CommutedMI->getOperand(Idx1).setSubReg(SubReg2);
215 CommutedMI->getOperand(Idx2).setIsKill(Reg1IsKill);
216 CommutedMI->getOperand(Idx1).setIsKill(Reg2IsKill);
217 CommutedMI->getOperand(Idx2).setIsUndef(Reg1IsUndef);
218 CommutedMI->getOperand(Idx1).setIsUndef(Reg2IsUndef);
219 CommutedMI->getOperand(Idx2).setIsInternalRead(Reg1IsInternal);
220 CommutedMI->getOperand(Idx1).setIsInternalRead(Reg2IsInternal);
221 // Avoid calling setIsRenamable for virtual registers since we assert that
222 // renamable property is only queried/set for physical registers.
223 if (TargetRegisterInfo::isPhysicalRegister(Reg1))
224 CommutedMI->getOperand(Idx2).setIsRenamable(Reg1IsRenamable);
225 if (TargetRegisterInfo::isPhysicalRegister(Reg2))
226 CommutedMI->getOperand(Idx1).setIsRenamable(Reg2IsRenamable);
227 return CommutedMI;
230 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr &MI, bool NewMI,
231 unsigned OpIdx1,
232 unsigned OpIdx2) const {
233 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
234 // any commutable operand, which is done in findCommutedOpIndices() method
235 // called below.
236 if ((OpIdx1 == CommuteAnyOperandIndex || OpIdx2 == CommuteAnyOperandIndex) &&
237 !findCommutedOpIndices(MI, OpIdx1, OpIdx2)) {
238 assert(MI.isCommutable() &&
239 "Precondition violation: MI must be commutable.");
240 return nullptr;
242 return commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
245 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1,
246 unsigned &ResultIdx2,
247 unsigned CommutableOpIdx1,
248 unsigned CommutableOpIdx2) {
249 if (ResultIdx1 == CommuteAnyOperandIndex &&
250 ResultIdx2 == CommuteAnyOperandIndex) {
251 ResultIdx1 = CommutableOpIdx1;
252 ResultIdx2 = CommutableOpIdx2;
253 } else if (ResultIdx1 == CommuteAnyOperandIndex) {
254 if (ResultIdx2 == CommutableOpIdx1)
255 ResultIdx1 = CommutableOpIdx2;
256 else if (ResultIdx2 == CommutableOpIdx2)
257 ResultIdx1 = CommutableOpIdx1;
258 else
259 return false;
260 } else if (ResultIdx2 == CommuteAnyOperandIndex) {
261 if (ResultIdx1 == CommutableOpIdx1)
262 ResultIdx2 = CommutableOpIdx2;
263 else if (ResultIdx1 == CommutableOpIdx2)
264 ResultIdx2 = CommutableOpIdx1;
265 else
266 return false;
267 } else
268 // Check that the result operand indices match the given commutable
269 // operand indices.
270 return (ResultIdx1 == CommutableOpIdx1 && ResultIdx2 == CommutableOpIdx2) ||
271 (ResultIdx1 == CommutableOpIdx2 && ResultIdx2 == CommutableOpIdx1);
273 return true;
276 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr &MI,
277 unsigned &SrcOpIdx1,
278 unsigned &SrcOpIdx2) const {
279 assert(!MI.isBundle() &&
280 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
282 const MCInstrDesc &MCID = MI.getDesc();
283 if (!MCID.isCommutable())
284 return false;
286 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
287 // is not true, then the target must implement this.
288 unsigned CommutableOpIdx1 = MCID.getNumDefs();
289 unsigned CommutableOpIdx2 = CommutableOpIdx1 + 1;
290 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2,
291 CommutableOpIdx1, CommutableOpIdx2))
292 return false;
294 if (!MI.getOperand(SrcOpIdx1).isReg() || !MI.getOperand(SrcOpIdx2).isReg())
295 // No idea.
296 return false;
297 return true;
300 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr &MI) const {
301 if (!MI.isTerminator()) return false;
303 // Conditional branch is a special case.
304 if (MI.isBranch() && !MI.isBarrier())
305 return true;
306 if (!MI.isPredicable())
307 return true;
308 return !isPredicated(MI);
311 bool TargetInstrInfo::PredicateInstruction(
312 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
313 bool MadeChange = false;
315 assert(!MI.isBundle() &&
316 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
318 const MCInstrDesc &MCID = MI.getDesc();
319 if (!MI.isPredicable())
320 return false;
322 for (unsigned j = 0, i = 0, e = MI.getNumOperands(); i != e; ++i) {
323 if (MCID.OpInfo[i].isPredicate()) {
324 MachineOperand &MO = MI.getOperand(i);
325 if (MO.isReg()) {
326 MO.setReg(Pred[j].getReg());
327 MadeChange = true;
328 } else if (MO.isImm()) {
329 MO.setImm(Pred[j].getImm());
330 MadeChange = true;
331 } else if (MO.isMBB()) {
332 MO.setMBB(Pred[j].getMBB());
333 MadeChange = true;
335 ++j;
338 return MadeChange;
341 bool TargetInstrInfo::hasLoadFromStackSlot(
342 const MachineInstr &MI,
343 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
344 size_t StartSize = Accesses.size();
345 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
346 oe = MI.memoperands_end();
347 o != oe; ++o) {
348 if ((*o)->isLoad() &&
349 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
350 Accesses.push_back(*o);
352 return Accesses.size() != StartSize;
355 bool TargetInstrInfo::hasStoreToStackSlot(
356 const MachineInstr &MI,
357 SmallVectorImpl<const MachineMemOperand *> &Accesses) const {
358 size_t StartSize = Accesses.size();
359 for (MachineInstr::mmo_iterator o = MI.memoperands_begin(),
360 oe = MI.memoperands_end();
361 o != oe; ++o) {
362 if ((*o)->isStore() &&
363 dyn_cast_or_null<FixedStackPseudoSourceValue>((*o)->getPseudoValue()))
364 Accesses.push_back(*o);
366 return Accesses.size() != StartSize;
369 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
370 unsigned SubIdx, unsigned &Size,
371 unsigned &Offset,
372 const MachineFunction &MF) const {
373 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
374 if (!SubIdx) {
375 Size = TRI->getSpillSize(*RC);
376 Offset = 0;
377 return true;
379 unsigned BitSize = TRI->getSubRegIdxSize(SubIdx);
380 // Convert bit size to byte size.
381 if (BitSize % 8)
382 return false;
384 int BitOffset = TRI->getSubRegIdxOffset(SubIdx);
385 if (BitOffset < 0 || BitOffset % 8)
386 return false;
388 Size = BitSize /= 8;
389 Offset = (unsigned)BitOffset / 8;
391 assert(TRI->getSpillSize(*RC) >= (Offset + Size) && "bad subregister range");
393 if (!MF.getDataLayout().isLittleEndian()) {
394 Offset = TRI->getSpillSize(*RC) - (Offset + Size);
396 return true;
399 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
400 MachineBasicBlock::iterator I,
401 unsigned DestReg, unsigned SubIdx,
402 const MachineInstr &Orig,
403 const TargetRegisterInfo &TRI) const {
404 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(&Orig);
405 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
406 MBB.insert(I, MI);
409 bool TargetInstrInfo::produceSameValue(const MachineInstr &MI0,
410 const MachineInstr &MI1,
411 const MachineRegisterInfo *MRI) const {
412 return MI0.isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
415 MachineInstr &TargetInstrInfo::duplicate(MachineBasicBlock &MBB,
416 MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const {
417 assert(!Orig.isNotDuplicable() && "Instruction cannot be duplicated");
418 MachineFunction &MF = *MBB.getParent();
419 return MF.CloneMachineInstrBundle(MBB, InsertBefore, Orig);
422 // If the COPY instruction in MI can be folded to a stack operation, return
423 // the register class to use.
424 static const TargetRegisterClass *canFoldCopy(const MachineInstr &MI,
425 unsigned FoldIdx) {
426 assert(MI.isCopy() && "MI must be a COPY instruction");
427 if (MI.getNumOperands() != 2)
428 return nullptr;
429 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
431 const MachineOperand &FoldOp = MI.getOperand(FoldIdx);
432 const MachineOperand &LiveOp = MI.getOperand(1 - FoldIdx);
434 if (FoldOp.getSubReg() || LiveOp.getSubReg())
435 return nullptr;
437 unsigned FoldReg = FoldOp.getReg();
438 unsigned LiveReg = LiveOp.getReg();
440 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
441 "Cannot fold physregs");
443 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
444 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
446 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
447 return RC->contains(LiveOp.getReg()) ? RC : nullptr;
449 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
450 return RC;
452 // FIXME: Allow folding when register classes are memory compatible.
453 return nullptr;
456 void TargetInstrInfo::getNoop(MCInst &NopInst) const {
457 llvm_unreachable("Not implemented");
460 static MachineInstr *foldPatchpoint(MachineFunction &MF, MachineInstr &MI,
461 ArrayRef<unsigned> Ops, int FrameIndex,
462 const TargetInstrInfo &TII) {
463 unsigned StartIdx = 0;
464 switch (MI.getOpcode()) {
465 case TargetOpcode::STACKMAP: {
466 // StackMapLiveValues are foldable
467 StartIdx = StackMapOpers(&MI).getVarIdx();
468 break;
470 case TargetOpcode::PATCHPOINT: {
471 // For PatchPoint, the call args are not foldable (even if reported in the
472 // stackmap e.g. via anyregcc).
473 StartIdx = PatchPointOpers(&MI).getVarIdx();
474 break;
476 case TargetOpcode::STATEPOINT: {
477 // For statepoints, fold deopt and gc arguments, but not call arguments.
478 StartIdx = StatepointOpers(&MI).getVarIdx();
479 break;
481 default:
482 llvm_unreachable("unexpected stackmap opcode");
485 // Return false if any operands requested for folding are not foldable (not
486 // part of the stackmap's live values).
487 for (unsigned Op : Ops) {
488 if (Op < StartIdx)
489 return nullptr;
492 MachineInstr *NewMI =
493 MF.CreateMachineInstr(TII.get(MI.getOpcode()), MI.getDebugLoc(), true);
494 MachineInstrBuilder MIB(MF, NewMI);
496 // No need to fold return, the meta data, and function arguments
497 for (unsigned i = 0; i < StartIdx; ++i)
498 MIB.add(MI.getOperand(i));
500 for (unsigned i = StartIdx; i < MI.getNumOperands(); ++i) {
501 MachineOperand &MO = MI.getOperand(i);
502 if (is_contained(Ops, i)) {
503 unsigned SpillSize;
504 unsigned SpillOffset;
505 // Compute the spill slot size and offset.
506 const TargetRegisterClass *RC =
507 MF.getRegInfo().getRegClass(MO.getReg());
508 bool Valid =
509 TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, SpillOffset, MF);
510 if (!Valid)
511 report_fatal_error("cannot spill patchpoint subregister operand");
512 MIB.addImm(StackMaps::IndirectMemRefOp);
513 MIB.addImm(SpillSize);
514 MIB.addFrameIndex(FrameIndex);
515 MIB.addImm(SpillOffset);
517 else
518 MIB.add(MO);
520 return NewMI;
523 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
524 ArrayRef<unsigned> Ops, int FI,
525 LiveIntervals *LIS) const {
526 auto Flags = MachineMemOperand::MONone;
527 for (unsigned OpIdx : Ops)
528 Flags |= MI.getOperand(OpIdx).isDef() ? MachineMemOperand::MOStore
529 : MachineMemOperand::MOLoad;
531 MachineBasicBlock *MBB = MI.getParent();
532 assert(MBB && "foldMemoryOperand needs an inserted instruction");
533 MachineFunction &MF = *MBB->getParent();
535 // If we're not folding a load into a subreg, the size of the load is the
536 // size of the spill slot. But if we are, we need to figure out what the
537 // actual load size is.
538 int64_t MemSize = 0;
539 const MachineFrameInfo &MFI = MF.getFrameInfo();
540 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
542 if (Flags & MachineMemOperand::MOStore) {
543 MemSize = MFI.getObjectSize(FI);
544 } else {
545 for (unsigned OpIdx : Ops) {
546 int64_t OpSize = MFI.getObjectSize(FI);
548 if (auto SubReg = MI.getOperand(OpIdx).getSubReg()) {
549 unsigned SubRegSize = TRI->getSubRegIdxSize(SubReg);
550 if (SubRegSize > 0 && !(SubRegSize % 8))
551 OpSize = SubRegSize / 8;
554 MemSize = std::max(MemSize, OpSize);
558 assert(MemSize && "Did not expect a zero-sized stack slot");
560 MachineInstr *NewMI = nullptr;
562 if (MI.getOpcode() == TargetOpcode::STACKMAP ||
563 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
564 MI.getOpcode() == TargetOpcode::STATEPOINT) {
565 // Fold stackmap/patchpoint.
566 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
567 if (NewMI)
568 MBB->insert(MI, NewMI);
569 } else {
570 // Ask the target to do the actual folding.
571 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI, LIS);
574 if (NewMI) {
575 NewMI->setMemRefs(MF, MI.memoperands());
576 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
577 assert((!(Flags & MachineMemOperand::MOStore) ||
578 NewMI->mayStore()) &&
579 "Folded a def to a non-store!");
580 assert((!(Flags & MachineMemOperand::MOLoad) ||
581 NewMI->mayLoad()) &&
582 "Folded a use to a non-load!");
583 assert(MFI.getObjectOffset(FI) != -1);
584 MachineMemOperand *MMO = MF.getMachineMemOperand(
585 MachinePointerInfo::getFixedStack(MF, FI), Flags, MemSize,
586 MFI.getObjectAlignment(FI));
587 NewMI->addMemOperand(MF, MMO);
589 return NewMI;
592 // Straight COPY may fold as load/store.
593 if (!MI.isCopy() || Ops.size() != 1)
594 return nullptr;
596 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
597 if (!RC)
598 return nullptr;
600 const MachineOperand &MO = MI.getOperand(1 - Ops[0]);
601 MachineBasicBlock::iterator Pos = MI;
603 if (Flags == MachineMemOperand::MOStore)
604 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
605 else
606 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
607 return &*--Pos;
610 MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineInstr &MI,
611 ArrayRef<unsigned> Ops,
612 MachineInstr &LoadMI,
613 LiveIntervals *LIS) const {
614 assert(LoadMI.canFoldAsLoad() && "LoadMI isn't foldable!");
615 #ifndef NDEBUG
616 for (unsigned OpIdx : Ops)
617 assert(MI.getOperand(OpIdx).isUse() && "Folding load into def!");
618 #endif
620 MachineBasicBlock &MBB = *MI.getParent();
621 MachineFunction &MF = *MBB.getParent();
623 // Ask the target to do the actual folding.
624 MachineInstr *NewMI = nullptr;
625 int FrameIndex = 0;
627 if ((MI.getOpcode() == TargetOpcode::STACKMAP ||
628 MI.getOpcode() == TargetOpcode::PATCHPOINT ||
629 MI.getOpcode() == TargetOpcode::STATEPOINT) &&
630 isLoadFromStackSlot(LoadMI, FrameIndex)) {
631 // Fold stackmap/patchpoint.
632 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
633 if (NewMI)
634 NewMI = &*MBB.insert(MI, NewMI);
635 } else {
636 // Ask the target to do the actual folding.
637 NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI, LIS);
640 if (!NewMI)
641 return nullptr;
643 // Copy the memoperands from the load to the folded instruction.
644 if (MI.memoperands_empty()) {
645 NewMI->setMemRefs(MF, LoadMI.memoperands());
646 } else {
647 // Handle the rare case of folding multiple loads.
648 NewMI->setMemRefs(MF, MI.memoperands());
649 for (MachineInstr::mmo_iterator I = LoadMI.memoperands_begin(),
650 E = LoadMI.memoperands_end();
651 I != E; ++I) {
652 NewMI->addMemOperand(MF, *I);
655 return NewMI;
658 bool TargetInstrInfo::hasReassociableOperands(
659 const MachineInstr &Inst, const MachineBasicBlock *MBB) const {
660 const MachineOperand &Op1 = Inst.getOperand(1);
661 const MachineOperand &Op2 = Inst.getOperand(2);
662 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
664 // We need virtual register definitions for the operands that we will
665 // reassociate.
666 MachineInstr *MI1 = nullptr;
667 MachineInstr *MI2 = nullptr;
668 if (Op1.isReg() && TargetRegisterInfo::isVirtualRegister(Op1.getReg()))
669 MI1 = MRI.getUniqueVRegDef(Op1.getReg());
670 if (Op2.isReg() && TargetRegisterInfo::isVirtualRegister(Op2.getReg()))
671 MI2 = MRI.getUniqueVRegDef(Op2.getReg());
673 // And they need to be in the trace (otherwise, they won't have a depth).
674 return MI1 && MI2 && MI1->getParent() == MBB && MI2->getParent() == MBB;
677 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr &Inst,
678 bool &Commuted) const {
679 const MachineBasicBlock *MBB = Inst.getParent();
680 const MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
681 MachineInstr *MI1 = MRI.getUniqueVRegDef(Inst.getOperand(1).getReg());
682 MachineInstr *MI2 = MRI.getUniqueVRegDef(Inst.getOperand(2).getReg());
683 unsigned AssocOpcode = Inst.getOpcode();
685 // If only one operand has the same opcode and it's the second source operand,
686 // the operands must be commuted.
687 Commuted = MI1->getOpcode() != AssocOpcode && MI2->getOpcode() == AssocOpcode;
688 if (Commuted)
689 std::swap(MI1, MI2);
691 // 1. The previous instruction must be the same type as Inst.
692 // 2. The previous instruction must have virtual register definitions for its
693 // operands in the same basic block as Inst.
694 // 3. The previous instruction's result must only be used by Inst.
695 return MI1->getOpcode() == AssocOpcode &&
696 hasReassociableOperands(*MI1, MBB) &&
697 MRI.hasOneNonDBGUse(MI1->getOperand(0).getReg());
700 // 1. The operation must be associative and commutative.
701 // 2. The instruction must have virtual register definitions for its
702 // operands in the same basic block.
703 // 3. The instruction must have a reassociable sibling.
704 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr &Inst,
705 bool &Commuted) const {
706 return isAssociativeAndCommutative(Inst) &&
707 hasReassociableOperands(Inst, Inst.getParent()) &&
708 hasReassociableSibling(Inst, Commuted);
711 // The concept of the reassociation pass is that these operations can benefit
712 // from this kind of transformation:
714 // A = ? op ?
715 // B = A op X (Prev)
716 // C = B op Y (Root)
717 // -->
718 // A = ? op ?
719 // B = X op Y
720 // C = A op B
722 // breaking the dependency between A and B, allowing them to be executed in
723 // parallel (or back-to-back in a pipeline) instead of depending on each other.
725 // FIXME: This has the potential to be expensive (compile time) while not
726 // improving the code at all. Some ways to limit the overhead:
727 // 1. Track successful transforms; bail out if hit rate gets too low.
728 // 2. Only enable at -O3 or some other non-default optimization level.
729 // 3. Pre-screen pattern candidates here: if an operand of the previous
730 // instruction is known to not increase the critical path, then don't match
731 // that pattern.
732 bool TargetInstrInfo::getMachineCombinerPatterns(
733 MachineInstr &Root,
734 SmallVectorImpl<MachineCombinerPattern> &Patterns) const {
735 bool Commute;
736 if (isReassociationCandidate(Root, Commute)) {
737 // We found a sequence of instructions that may be suitable for a
738 // reassociation of operands to increase ILP. Specify each commutation
739 // possibility for the Prev instruction in the sequence and let the
740 // machine combiner decide if changing the operands is worthwhile.
741 if (Commute) {
742 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_YB);
743 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_YB);
744 } else {
745 Patterns.push_back(MachineCombinerPattern::REASSOC_AX_BY);
746 Patterns.push_back(MachineCombinerPattern::REASSOC_XA_BY);
748 return true;
751 return false;
754 /// Return true when a code sequence can improve loop throughput.
755 bool
756 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern) const {
757 return false;
760 /// Attempt the reassociation transformation to reduce critical path length.
761 /// See the above comments before getMachineCombinerPatterns().
762 void TargetInstrInfo::reassociateOps(
763 MachineInstr &Root, MachineInstr &Prev,
764 MachineCombinerPattern Pattern,
765 SmallVectorImpl<MachineInstr *> &InsInstrs,
766 SmallVectorImpl<MachineInstr *> &DelInstrs,
767 DenseMap<unsigned, unsigned> &InstrIdxForVirtReg) const {
768 MachineFunction *MF = Root.getMF();
769 MachineRegisterInfo &MRI = MF->getRegInfo();
770 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
771 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
772 const TargetRegisterClass *RC = Root.getRegClassConstraint(0, TII, TRI);
774 // This array encodes the operand index for each parameter because the
775 // operands may be commuted. Each row corresponds to a pattern value,
776 // and each column specifies the index of A, B, X, Y.
777 unsigned OpIdx[4][4] = {
778 { 1, 1, 2, 2 },
779 { 1, 2, 2, 1 },
780 { 2, 1, 1, 2 },
781 { 2, 2, 1, 1 }
784 int Row;
785 switch (Pattern) {
786 case MachineCombinerPattern::REASSOC_AX_BY: Row = 0; break;
787 case MachineCombinerPattern::REASSOC_AX_YB: Row = 1; break;
788 case MachineCombinerPattern::REASSOC_XA_BY: Row = 2; break;
789 case MachineCombinerPattern::REASSOC_XA_YB: Row = 3; break;
790 default: llvm_unreachable("unexpected MachineCombinerPattern");
793 MachineOperand &OpA = Prev.getOperand(OpIdx[Row][0]);
794 MachineOperand &OpB = Root.getOperand(OpIdx[Row][1]);
795 MachineOperand &OpX = Prev.getOperand(OpIdx[Row][2]);
796 MachineOperand &OpY = Root.getOperand(OpIdx[Row][3]);
797 MachineOperand &OpC = Root.getOperand(0);
799 unsigned RegA = OpA.getReg();
800 unsigned RegB = OpB.getReg();
801 unsigned RegX = OpX.getReg();
802 unsigned RegY = OpY.getReg();
803 unsigned RegC = OpC.getReg();
805 if (TargetRegisterInfo::isVirtualRegister(RegA))
806 MRI.constrainRegClass(RegA, RC);
807 if (TargetRegisterInfo::isVirtualRegister(RegB))
808 MRI.constrainRegClass(RegB, RC);
809 if (TargetRegisterInfo::isVirtualRegister(RegX))
810 MRI.constrainRegClass(RegX, RC);
811 if (TargetRegisterInfo::isVirtualRegister(RegY))
812 MRI.constrainRegClass(RegY, RC);
813 if (TargetRegisterInfo::isVirtualRegister(RegC))
814 MRI.constrainRegClass(RegC, RC);
816 // Create a new virtual register for the result of (X op Y) instead of
817 // recycling RegB because the MachineCombiner's computation of the critical
818 // path requires a new register definition rather than an existing one.
819 unsigned NewVR = MRI.createVirtualRegister(RC);
820 InstrIdxForVirtReg.insert(std::make_pair(NewVR, 0));
822 unsigned Opcode = Root.getOpcode();
823 bool KillA = OpA.isKill();
824 bool KillX = OpX.isKill();
825 bool KillY = OpY.isKill();
827 // Create new instructions for insertion.
828 MachineInstrBuilder MIB1 =
829 BuildMI(*MF, Prev.getDebugLoc(), TII->get(Opcode), NewVR)
830 .addReg(RegX, getKillRegState(KillX))
831 .addReg(RegY, getKillRegState(KillY));
832 MachineInstrBuilder MIB2 =
833 BuildMI(*MF, Root.getDebugLoc(), TII->get(Opcode), RegC)
834 .addReg(RegA, getKillRegState(KillA))
835 .addReg(NewVR, getKillRegState(true));
837 setSpecialOperandAttr(Root, Prev, *MIB1, *MIB2);
839 // Record new instructions for insertion and old instructions for deletion.
840 InsInstrs.push_back(MIB1);
841 InsInstrs.push_back(MIB2);
842 DelInstrs.push_back(&Prev);
843 DelInstrs.push_back(&Root);
846 void TargetInstrInfo::genAlternativeCodeSequence(
847 MachineInstr &Root, MachineCombinerPattern Pattern,
848 SmallVectorImpl<MachineInstr *> &InsInstrs,
849 SmallVectorImpl<MachineInstr *> &DelInstrs,
850 DenseMap<unsigned, unsigned> &InstIdxForVirtReg) const {
851 MachineRegisterInfo &MRI = Root.getMF()->getRegInfo();
853 // Select the previous instruction in the sequence based on the input pattern.
854 MachineInstr *Prev = nullptr;
855 switch (Pattern) {
856 case MachineCombinerPattern::REASSOC_AX_BY:
857 case MachineCombinerPattern::REASSOC_XA_BY:
858 Prev = MRI.getUniqueVRegDef(Root.getOperand(1).getReg());
859 break;
860 case MachineCombinerPattern::REASSOC_AX_YB:
861 case MachineCombinerPattern::REASSOC_XA_YB:
862 Prev = MRI.getUniqueVRegDef(Root.getOperand(2).getReg());
863 break;
864 default:
865 break;
868 assert(Prev && "Unknown pattern for machine combiner");
870 reassociateOps(Root, *Prev, Pattern, InsInstrs, DelInstrs, InstIdxForVirtReg);
873 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
874 const MachineInstr &MI, AliasAnalysis *AA) const {
875 const MachineFunction &MF = *MI.getMF();
876 const MachineRegisterInfo &MRI = MF.getRegInfo();
878 // Remat clients assume operand 0 is the defined register.
879 if (!MI.getNumOperands() || !MI.getOperand(0).isReg())
880 return false;
881 unsigned DefReg = MI.getOperand(0).getReg();
883 // A sub-register definition can only be rematerialized if the instruction
884 // doesn't read the other parts of the register. Otherwise it is really a
885 // read-modify-write operation on the full virtual register which cannot be
886 // moved safely.
887 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
888 MI.getOperand(0).getSubReg() && MI.readsVirtualRegister(DefReg))
889 return false;
891 // A load from a fixed stack slot can be rematerialized. This may be
892 // redundant with subsequent checks, but it's target-independent,
893 // simple, and a common case.
894 int FrameIdx = 0;
895 if (isLoadFromStackSlot(MI, FrameIdx) &&
896 MF.getFrameInfo().isImmutableObjectIndex(FrameIdx))
897 return true;
899 // Avoid instructions obviously unsafe for remat.
900 if (MI.isNotDuplicable() || MI.mayStore() || MI.hasUnmodeledSideEffects())
901 return false;
903 // Don't remat inline asm. We have no idea how expensive it is
904 // even if it's side effect free.
905 if (MI.isInlineAsm())
906 return false;
908 // Avoid instructions which load from potentially varying memory.
909 if (MI.mayLoad() && !MI.isDereferenceableInvariantLoad(AA))
910 return false;
912 // If any of the registers accessed are non-constant, conservatively assume
913 // the instruction is not rematerializable.
914 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
915 const MachineOperand &MO = MI.getOperand(i);
916 if (!MO.isReg()) continue;
917 unsigned Reg = MO.getReg();
918 if (Reg == 0)
919 continue;
921 // Check for a well-behaved physical register.
922 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
923 if (MO.isUse()) {
924 // If the physreg has no defs anywhere, it's just an ambient register
925 // and we can freely move its uses. Alternatively, if it's allocatable,
926 // it could get allocated to something with a def during allocation.
927 if (!MRI.isConstantPhysReg(Reg))
928 return false;
929 } else {
930 // A physreg def. We can't remat it.
931 return false;
933 continue;
936 // Only allow one virtual-register def. There may be multiple defs of the
937 // same virtual register, though.
938 if (MO.isDef() && Reg != DefReg)
939 return false;
941 // Don't allow any virtual-register uses. Rematting an instruction with
942 // virtual register uses would length the live ranges of the uses, which
943 // is not necessarily a good idea, certainly not "trivial".
944 if (MO.isUse())
945 return false;
948 // Everything checked out.
949 return true;
952 int TargetInstrInfo::getSPAdjust(const MachineInstr &MI) const {
953 const MachineFunction *MF = MI.getMF();
954 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
955 bool StackGrowsDown =
956 TFI->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown;
958 unsigned FrameSetupOpcode = getCallFrameSetupOpcode();
959 unsigned FrameDestroyOpcode = getCallFrameDestroyOpcode();
961 if (!isFrameInstr(MI))
962 return 0;
964 int SPAdj = TFI->alignSPAdjust(getFrameSize(MI));
966 if ((!StackGrowsDown && MI.getOpcode() == FrameSetupOpcode) ||
967 (StackGrowsDown && MI.getOpcode() == FrameDestroyOpcode))
968 SPAdj = -SPAdj;
970 return SPAdj;
973 /// isSchedulingBoundary - Test if the given instruction should be
974 /// considered a scheduling boundary. This primarily includes labels
975 /// and terminators.
976 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr &MI,
977 const MachineBasicBlock *MBB,
978 const MachineFunction &MF) const {
979 // Terminators and labels can't be scheduled around.
980 if (MI.isTerminator() || MI.isPosition())
981 return true;
983 // Don't attempt to schedule around any instruction that defines
984 // a stack-oriented pointer, as it's unlikely to be profitable. This
985 // saves compile time, because it doesn't require every single
986 // stack slot reference to depend on the instruction that does the
987 // modification.
988 const TargetLowering &TLI = *MF.getSubtarget().getTargetLowering();
989 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
990 return MI.modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI);
993 // Provide a global flag for disabling the PreRA hazard recognizer that targets
994 // may choose to honor.
995 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
996 return !DisableHazardRecognizer;
999 // Default implementation of CreateTargetRAHazardRecognizer.
1000 ScheduleHazardRecognizer *TargetInstrInfo::
1001 CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI,
1002 const ScheduleDAG *DAG) const {
1003 // Dummy hazard recognizer allows all instructions to issue.
1004 return new ScheduleHazardRecognizer();
1007 // Default implementation of CreateTargetMIHazardRecognizer.
1008 ScheduleHazardRecognizer *TargetInstrInfo::
1009 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
1010 const ScheduleDAG *DAG) const {
1011 return (ScheduleHazardRecognizer *)
1012 new ScoreboardHazardRecognizer(II, DAG, "misched");
1015 // Default implementation of CreateTargetPostRAHazardRecognizer.
1016 ScheduleHazardRecognizer *TargetInstrInfo::
1017 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
1018 const ScheduleDAG *DAG) const {
1019 return (ScheduleHazardRecognizer *)
1020 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
1023 //===----------------------------------------------------------------------===//
1024 // SelectionDAG latency interface.
1025 //===----------------------------------------------------------------------===//
1028 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1029 SDNode *DefNode, unsigned DefIdx,
1030 SDNode *UseNode, unsigned UseIdx) const {
1031 if (!ItinData || ItinData->isEmpty())
1032 return -1;
1034 if (!DefNode->isMachineOpcode())
1035 return -1;
1037 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
1038 if (!UseNode->isMachineOpcode())
1039 return ItinData->getOperandCycle(DefClass, DefIdx);
1040 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
1041 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1044 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1045 SDNode *N) const {
1046 if (!ItinData || ItinData->isEmpty())
1047 return 1;
1049 if (!N->isMachineOpcode())
1050 return 1;
1052 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
1055 //===----------------------------------------------------------------------===//
1056 // MachineInstr latency interface.
1057 //===----------------------------------------------------------------------===//
1059 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
1060 const MachineInstr &MI) const {
1061 if (!ItinData || ItinData->isEmpty())
1062 return 1;
1064 unsigned Class = MI.getDesc().getSchedClass();
1065 int UOps = ItinData->Itineraries[Class].NumMicroOps;
1066 if (UOps >= 0)
1067 return UOps;
1069 // The # of u-ops is dynamically determined. The specific target should
1070 // override this function to return the right number.
1071 return 1;
1074 /// Return the default expected latency for a def based on it's opcode.
1075 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel &SchedModel,
1076 const MachineInstr &DefMI) const {
1077 if (DefMI.isTransient())
1078 return 0;
1079 if (DefMI.mayLoad())
1080 return SchedModel.LoadLatency;
1081 if (isHighLatencyDef(DefMI.getOpcode()))
1082 return SchedModel.HighLatency;
1083 return 1;
1086 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr &) const {
1087 return 0;
1090 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1091 const MachineInstr &MI,
1092 unsigned *PredCost) const {
1093 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1094 // still have a MinLatency property, which getStageLatency checks.
1095 if (!ItinData)
1096 return MI.mayLoad() ? 2 : 1;
1098 return ItinData->getStageLatency(MI.getDesc().getSchedClass());
1101 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel &SchedModel,
1102 const MachineInstr &DefMI,
1103 unsigned DefIdx) const {
1104 const InstrItineraryData *ItinData = SchedModel.getInstrItineraries();
1105 if (!ItinData || ItinData->isEmpty())
1106 return false;
1108 unsigned DefClass = DefMI.getDesc().getSchedClass();
1109 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
1110 return (DefCycle != -1 && DefCycle <= 1);
1113 /// Both DefMI and UseMI must be valid. By default, call directly to the
1114 /// itinerary. This may be overriden by the target.
1115 int TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
1116 const MachineInstr &DefMI,
1117 unsigned DefIdx,
1118 const MachineInstr &UseMI,
1119 unsigned UseIdx) const {
1120 unsigned DefClass = DefMI.getDesc().getSchedClass();
1121 unsigned UseClass = UseMI.getDesc().getSchedClass();
1122 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
1125 /// If we can determine the operand latency from the def only, without itinerary
1126 /// lookup, do so. Otherwise return -1.
1127 int TargetInstrInfo::computeDefOperandLatency(
1128 const InstrItineraryData *ItinData, const MachineInstr &DefMI) const {
1130 // Let the target hook getInstrLatency handle missing itineraries.
1131 if (!ItinData)
1132 return getInstrLatency(ItinData, DefMI);
1134 if(ItinData->isEmpty())
1135 return defaultDefLatency(ItinData->SchedModel, DefMI);
1137 // ...operand lookup required
1138 return -1;
1141 bool TargetInstrInfo::getRegSequenceInputs(
1142 const MachineInstr &MI, unsigned DefIdx,
1143 SmallVectorImpl<RegSubRegPairAndIdx> &InputRegs) const {
1144 assert((MI.isRegSequence() ||
1145 MI.isRegSequenceLike()) && "Instruction do not have the proper type");
1147 if (!MI.isRegSequence())
1148 return getRegSequenceLikeInputs(MI, DefIdx, InputRegs);
1150 // We are looking at:
1151 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1152 assert(DefIdx == 0 && "REG_SEQUENCE only has one def");
1153 for (unsigned OpIdx = 1, EndOpIdx = MI.getNumOperands(); OpIdx != EndOpIdx;
1154 OpIdx += 2) {
1155 const MachineOperand &MOReg = MI.getOperand(OpIdx);
1156 if (MOReg.isUndef())
1157 continue;
1158 const MachineOperand &MOSubIdx = MI.getOperand(OpIdx + 1);
1159 assert(MOSubIdx.isImm() &&
1160 "One of the subindex of the reg_sequence is not an immediate");
1161 // Record Reg:SubReg, SubIdx.
1162 InputRegs.push_back(RegSubRegPairAndIdx(MOReg.getReg(), MOReg.getSubReg(),
1163 (unsigned)MOSubIdx.getImm()));
1165 return true;
1168 bool TargetInstrInfo::getExtractSubregInputs(
1169 const MachineInstr &MI, unsigned DefIdx,
1170 RegSubRegPairAndIdx &InputReg) const {
1171 assert((MI.isExtractSubreg() ||
1172 MI.isExtractSubregLike()) && "Instruction do not have the proper type");
1174 if (!MI.isExtractSubreg())
1175 return getExtractSubregLikeInputs(MI, DefIdx, InputReg);
1177 // We are looking at:
1178 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1179 assert(DefIdx == 0 && "EXTRACT_SUBREG only has one def");
1180 const MachineOperand &MOReg = MI.getOperand(1);
1181 if (MOReg.isUndef())
1182 return false;
1183 const MachineOperand &MOSubIdx = MI.getOperand(2);
1184 assert(MOSubIdx.isImm() &&
1185 "The subindex of the extract_subreg is not an immediate");
1187 InputReg.Reg = MOReg.getReg();
1188 InputReg.SubReg = MOReg.getSubReg();
1189 InputReg.SubIdx = (unsigned)MOSubIdx.getImm();
1190 return true;
1193 bool TargetInstrInfo::getInsertSubregInputs(
1194 const MachineInstr &MI, unsigned DefIdx,
1195 RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const {
1196 assert((MI.isInsertSubreg() ||
1197 MI.isInsertSubregLike()) && "Instruction do not have the proper type");
1199 if (!MI.isInsertSubreg())
1200 return getInsertSubregLikeInputs(MI, DefIdx, BaseReg, InsertedReg);
1202 // We are looking at:
1203 // Def = INSERT_SEQUENCE v0, v1, sub0.
1204 assert(DefIdx == 0 && "INSERT_SUBREG only has one def");
1205 const MachineOperand &MOBaseReg = MI.getOperand(1);
1206 const MachineOperand &MOInsertedReg = MI.getOperand(2);
1207 if (MOInsertedReg.isUndef())
1208 return false;
1209 const MachineOperand &MOSubIdx = MI.getOperand(3);
1210 assert(MOSubIdx.isImm() &&
1211 "One of the subindex of the reg_sequence is not an immediate");
1212 BaseReg.Reg = MOBaseReg.getReg();
1213 BaseReg.SubReg = MOBaseReg.getSubReg();
1215 InsertedReg.Reg = MOInsertedReg.getReg();
1216 InsertedReg.SubReg = MOInsertedReg.getSubReg();
1217 InsertedReg.SubIdx = (unsigned)MOSubIdx.getImm();
1218 return true;