1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/BinaryFormat/Dwarf.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineMemOperand.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/MachineScheduler.h"
21 #include "llvm/CodeGen/PseudoSourceValue.h"
22 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
23 #include "llvm/CodeGen/StackMaps.h"
24 #include "llvm/CodeGen/TargetFrameLowering.h"
25 #include "llvm/CodeGen/TargetLowering.h"
26 #include "llvm/CodeGen/TargetRegisterInfo.h"
27 #include "llvm/CodeGen/TargetSchedule.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DebugInfoMetadata.h"
30 #include "llvm/MC/MCAsmInfo.h"
31 #include "llvm/MC/MCInstrItineraries.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 static cl::opt
<bool> DisableHazardRecognizer(
39 "disable-sched-hazard", cl::Hidden
, cl::init(false),
40 cl::desc("Disable hazard detection during preRA scheduling"));
42 TargetInstrInfo::~TargetInstrInfo() = default;
44 const TargetRegisterClass
*
45 TargetInstrInfo::getRegClass(const MCInstrDesc
&MCID
, unsigned OpNum
,
46 const TargetRegisterInfo
*TRI
,
47 const MachineFunction
&MF
) const {
48 if (OpNum
>= MCID
.getNumOperands())
51 short RegClass
= MCID
.OpInfo
[OpNum
].RegClass
;
52 if (MCID
.OpInfo
[OpNum
].isLookupPtrRegClass())
53 return TRI
->getPointerRegClass(MF
, RegClass
);
55 // Instructions like INSERT_SUBREG do not have fixed register classes.
59 // Otherwise just look it up normally.
60 return TRI
->getRegClass(RegClass
);
63 /// insertNoop - Insert a noop into the instruction stream at the specified
65 void TargetInstrInfo::insertNoop(MachineBasicBlock
&MBB
,
66 MachineBasicBlock::iterator MI
) const {
67 llvm_unreachable("Target didn't implement insertNoop!");
70 /// insertNoops - Insert noops into the instruction stream at the specified
72 void TargetInstrInfo::insertNoops(MachineBasicBlock
&MBB
,
73 MachineBasicBlock::iterator MI
,
74 unsigned Quantity
) const {
75 for (unsigned i
= 0; i
< Quantity
; ++i
)
79 static bool isAsmComment(const char *Str
, const MCAsmInfo
&MAI
) {
80 return strncmp(Str
, MAI
.getCommentString().data(),
81 MAI
.getCommentString().size()) == 0;
84 /// Measure the specified inline asm to determine an approximation of its
86 /// Comments (which run till the next SeparatorString or newline) do not
87 /// count as an instruction.
88 /// Any other non-whitespace text is considered an instruction, with
89 /// multiple instructions separated by SeparatorString or newlines.
90 /// Variable-length instructions are not handled here; this function
91 /// may be overloaded in the target code to do that.
92 /// We implement a special case of the .space directive which takes only a
93 /// single integer argument in base 10 that is the size in bytes. This is a
94 /// restricted form of the GAS directive in that we only interpret
95 /// simple--i.e. not a logical or arithmetic expression--size values without
96 /// the optional fill value. This is primarily used for creating arbitrary
97 /// sized inline asm blocks for testing purposes.
98 unsigned TargetInstrInfo::getInlineAsmLength(
100 const MCAsmInfo
&MAI
, const TargetSubtargetInfo
*STI
) const {
101 // Count the number of instructions in the asm.
102 bool AtInsnStart
= true;
104 const unsigned MaxInstLength
= MAI
.getMaxInstLength(STI
);
105 for (; *Str
; ++Str
) {
106 if (*Str
== '\n' || strncmp(Str
, MAI
.getSeparatorString(),
107 strlen(MAI
.getSeparatorString())) == 0) {
109 } else if (isAsmComment(Str
, MAI
)) {
110 // Stop counting as an instruction after a comment until the next
115 if (AtInsnStart
&& !isSpace(static_cast<unsigned char>(*Str
))) {
116 unsigned AddLength
= MaxInstLength
;
117 if (strncmp(Str
, ".space", 6) == 0) {
120 SpaceSize
= strtol(Str
+ 6, &EStr
, 10);
121 SpaceSize
= SpaceSize
< 0 ? 0 : SpaceSize
;
122 while (*EStr
!= '\n' && isSpace(static_cast<unsigned char>(*EStr
)))
124 if (*EStr
== '\0' || *EStr
== '\n' ||
125 isAsmComment(EStr
, MAI
)) // Successfully parsed .space argument
126 AddLength
= SpaceSize
;
136 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
137 /// after it, replacing it with an unconditional branch to NewDest.
139 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail
,
140 MachineBasicBlock
*NewDest
) const {
141 MachineBasicBlock
*MBB
= Tail
->getParent();
143 // Remove all the old successors of MBB from the CFG.
144 while (!MBB
->succ_empty())
145 MBB
->removeSuccessor(MBB
->succ_begin());
147 // Save off the debug loc before erasing the instruction.
148 DebugLoc DL
= Tail
->getDebugLoc();
150 // Update call site info and remove all the dead instructions
151 // from the end of MBB.
152 while (Tail
!= MBB
->end()) {
154 if (MI
->shouldUpdateCallSiteInfo())
155 MBB
->getParent()->eraseCallSiteInfo(&*MI
);
159 // If MBB isn't immediately before MBB, insert a branch to it.
160 if (++MachineFunction::iterator(MBB
) != MachineFunction::iterator(NewDest
))
161 insertBranch(*MBB
, NewDest
, nullptr, SmallVector
<MachineOperand
, 0>(), DL
);
162 MBB
->addSuccessor(NewDest
);
165 MachineInstr
*TargetInstrInfo::commuteInstructionImpl(MachineInstr
&MI
,
166 bool NewMI
, unsigned Idx1
,
167 unsigned Idx2
) const {
168 const MCInstrDesc
&MCID
= MI
.getDesc();
169 bool HasDef
= MCID
.getNumDefs();
170 if (HasDef
&& !MI
.getOperand(0).isReg())
171 // No idea how to commute this instruction. Target should implement its own.
174 unsigned CommutableOpIdx1
= Idx1
; (void)CommutableOpIdx1
;
175 unsigned CommutableOpIdx2
= Idx2
; (void)CommutableOpIdx2
;
176 assert(findCommutedOpIndices(MI
, CommutableOpIdx1
, CommutableOpIdx2
) &&
177 CommutableOpIdx1
== Idx1
&& CommutableOpIdx2
== Idx2
&&
178 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
179 assert(MI
.getOperand(Idx1
).isReg() && MI
.getOperand(Idx2
).isReg() &&
180 "This only knows how to commute register operands so far");
182 Register Reg0
= HasDef
? MI
.getOperand(0).getReg() : Register();
183 Register Reg1
= MI
.getOperand(Idx1
).getReg();
184 Register Reg2
= MI
.getOperand(Idx2
).getReg();
185 unsigned SubReg0
= HasDef
? MI
.getOperand(0).getSubReg() : 0;
186 unsigned SubReg1
= MI
.getOperand(Idx1
).getSubReg();
187 unsigned SubReg2
= MI
.getOperand(Idx2
).getSubReg();
188 bool Reg1IsKill
= MI
.getOperand(Idx1
).isKill();
189 bool Reg2IsKill
= MI
.getOperand(Idx2
).isKill();
190 bool Reg1IsUndef
= MI
.getOperand(Idx1
).isUndef();
191 bool Reg2IsUndef
= MI
.getOperand(Idx2
).isUndef();
192 bool Reg1IsInternal
= MI
.getOperand(Idx1
).isInternalRead();
193 bool Reg2IsInternal
= MI
.getOperand(Idx2
).isInternalRead();
194 // Avoid calling isRenamable for virtual registers since we assert that
195 // renamable property is only queried/set for physical registers.
196 bool Reg1IsRenamable
= Register::isPhysicalRegister(Reg1
)
197 ? MI
.getOperand(Idx1
).isRenamable()
199 bool Reg2IsRenamable
= Register::isPhysicalRegister(Reg2
)
200 ? MI
.getOperand(Idx2
).isRenamable()
202 // If destination is tied to either of the commuted source register, then
203 // it must be updated.
204 if (HasDef
&& Reg0
== Reg1
&&
205 MI
.getDesc().getOperandConstraint(Idx1
, MCOI::TIED_TO
) == 0) {
209 } else if (HasDef
&& Reg0
== Reg2
&&
210 MI
.getDesc().getOperandConstraint(Idx2
, MCOI::TIED_TO
) == 0) {
216 MachineInstr
*CommutedMI
= nullptr;
218 // Create a new instruction.
219 MachineFunction
&MF
= *MI
.getMF();
220 CommutedMI
= MF
.CloneMachineInstr(&MI
);
226 CommutedMI
->getOperand(0).setReg(Reg0
);
227 CommutedMI
->getOperand(0).setSubReg(SubReg0
);
229 CommutedMI
->getOperand(Idx2
).setReg(Reg1
);
230 CommutedMI
->getOperand(Idx1
).setReg(Reg2
);
231 CommutedMI
->getOperand(Idx2
).setSubReg(SubReg1
);
232 CommutedMI
->getOperand(Idx1
).setSubReg(SubReg2
);
233 CommutedMI
->getOperand(Idx2
).setIsKill(Reg1IsKill
);
234 CommutedMI
->getOperand(Idx1
).setIsKill(Reg2IsKill
);
235 CommutedMI
->getOperand(Idx2
).setIsUndef(Reg1IsUndef
);
236 CommutedMI
->getOperand(Idx1
).setIsUndef(Reg2IsUndef
);
237 CommutedMI
->getOperand(Idx2
).setIsInternalRead(Reg1IsInternal
);
238 CommutedMI
->getOperand(Idx1
).setIsInternalRead(Reg2IsInternal
);
239 // Avoid calling setIsRenamable for virtual registers since we assert that
240 // renamable property is only queried/set for physical registers.
241 if (Register::isPhysicalRegister(Reg1
))
242 CommutedMI
->getOperand(Idx2
).setIsRenamable(Reg1IsRenamable
);
243 if (Register::isPhysicalRegister(Reg2
))
244 CommutedMI
->getOperand(Idx1
).setIsRenamable(Reg2IsRenamable
);
248 MachineInstr
*TargetInstrInfo::commuteInstruction(MachineInstr
&MI
, bool NewMI
,
250 unsigned OpIdx2
) const {
251 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
252 // any commutable operand, which is done in findCommutedOpIndices() method
254 if ((OpIdx1
== CommuteAnyOperandIndex
|| OpIdx2
== CommuteAnyOperandIndex
) &&
255 !findCommutedOpIndices(MI
, OpIdx1
, OpIdx2
)) {
256 assert(MI
.isCommutable() &&
257 "Precondition violation: MI must be commutable.");
260 return commuteInstructionImpl(MI
, NewMI
, OpIdx1
, OpIdx2
);
263 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1
,
264 unsigned &ResultIdx2
,
265 unsigned CommutableOpIdx1
,
266 unsigned CommutableOpIdx2
) {
267 if (ResultIdx1
== CommuteAnyOperandIndex
&&
268 ResultIdx2
== CommuteAnyOperandIndex
) {
269 ResultIdx1
= CommutableOpIdx1
;
270 ResultIdx2
= CommutableOpIdx2
;
271 } else if (ResultIdx1
== CommuteAnyOperandIndex
) {
272 if (ResultIdx2
== CommutableOpIdx1
)
273 ResultIdx1
= CommutableOpIdx2
;
274 else if (ResultIdx2
== CommutableOpIdx2
)
275 ResultIdx1
= CommutableOpIdx1
;
278 } else if (ResultIdx2
== CommuteAnyOperandIndex
) {
279 if (ResultIdx1
== CommutableOpIdx1
)
280 ResultIdx2
= CommutableOpIdx2
;
281 else if (ResultIdx1
== CommutableOpIdx2
)
282 ResultIdx2
= CommutableOpIdx1
;
286 // Check that the result operand indices match the given commutable
288 return (ResultIdx1
== CommutableOpIdx1
&& ResultIdx2
== CommutableOpIdx2
) ||
289 (ResultIdx1
== CommutableOpIdx2
&& ResultIdx2
== CommutableOpIdx1
);
294 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr
&MI
,
296 unsigned &SrcOpIdx2
) const {
297 assert(!MI
.isBundle() &&
298 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
300 const MCInstrDesc
&MCID
= MI
.getDesc();
301 if (!MCID
.isCommutable())
304 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
305 // is not true, then the target must implement this.
306 unsigned CommutableOpIdx1
= MCID
.getNumDefs();
307 unsigned CommutableOpIdx2
= CommutableOpIdx1
+ 1;
308 if (!fixCommutedOpIndices(SrcOpIdx1
, SrcOpIdx2
,
309 CommutableOpIdx1
, CommutableOpIdx2
))
312 if (!MI
.getOperand(SrcOpIdx1
).isReg() || !MI
.getOperand(SrcOpIdx2
).isReg())
318 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr
&MI
) const {
319 if (!MI
.isTerminator()) return false;
321 // Conditional branch is a special case.
322 if (MI
.isBranch() && !MI
.isBarrier())
324 if (!MI
.isPredicable())
326 return !isPredicated(MI
);
329 bool TargetInstrInfo::PredicateInstruction(
330 MachineInstr
&MI
, ArrayRef
<MachineOperand
> Pred
) const {
331 bool MadeChange
= false;
333 assert(!MI
.isBundle() &&
334 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
336 const MCInstrDesc
&MCID
= MI
.getDesc();
337 if (!MI
.isPredicable())
340 for (unsigned j
= 0, i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
341 if (MCID
.OpInfo
[i
].isPredicate()) {
342 MachineOperand
&MO
= MI
.getOperand(i
);
344 MO
.setReg(Pred
[j
].getReg());
346 } else if (MO
.isImm()) {
347 MO
.setImm(Pred
[j
].getImm());
349 } else if (MO
.isMBB()) {
350 MO
.setMBB(Pred
[j
].getMBB());
359 bool TargetInstrInfo::hasLoadFromStackSlot(
360 const MachineInstr
&MI
,
361 SmallVectorImpl
<const MachineMemOperand
*> &Accesses
) const {
362 size_t StartSize
= Accesses
.size();
363 for (MachineInstr::mmo_iterator o
= MI
.memoperands_begin(),
364 oe
= MI
.memoperands_end();
366 if ((*o
)->isLoad() &&
367 isa_and_nonnull
<FixedStackPseudoSourceValue
>((*o
)->getPseudoValue()))
368 Accesses
.push_back(*o
);
370 return Accesses
.size() != StartSize
;
373 bool TargetInstrInfo::hasStoreToStackSlot(
374 const MachineInstr
&MI
,
375 SmallVectorImpl
<const MachineMemOperand
*> &Accesses
) const {
376 size_t StartSize
= Accesses
.size();
377 for (MachineInstr::mmo_iterator o
= MI
.memoperands_begin(),
378 oe
= MI
.memoperands_end();
380 if ((*o
)->isStore() &&
381 isa_and_nonnull
<FixedStackPseudoSourceValue
>((*o
)->getPseudoValue()))
382 Accesses
.push_back(*o
);
384 return Accesses
.size() != StartSize
;
387 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass
*RC
,
388 unsigned SubIdx
, unsigned &Size
,
390 const MachineFunction
&MF
) const {
391 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
393 Size
= TRI
->getSpillSize(*RC
);
397 unsigned BitSize
= TRI
->getSubRegIdxSize(SubIdx
);
398 // Convert bit size to byte size.
402 int BitOffset
= TRI
->getSubRegIdxOffset(SubIdx
);
403 if (BitOffset
< 0 || BitOffset
% 8)
407 Offset
= (unsigned)BitOffset
/ 8;
409 assert(TRI
->getSpillSize(*RC
) >= (Offset
+ Size
) && "bad subregister range");
411 if (!MF
.getDataLayout().isLittleEndian()) {
412 Offset
= TRI
->getSpillSize(*RC
) - (Offset
+ Size
);
417 void TargetInstrInfo::reMaterialize(MachineBasicBlock
&MBB
,
418 MachineBasicBlock::iterator I
,
419 Register DestReg
, unsigned SubIdx
,
420 const MachineInstr
&Orig
,
421 const TargetRegisterInfo
&TRI
) const {
422 MachineInstr
*MI
= MBB
.getParent()->CloneMachineInstr(&Orig
);
423 MI
->substituteRegister(MI
->getOperand(0).getReg(), DestReg
, SubIdx
, TRI
);
427 bool TargetInstrInfo::produceSameValue(const MachineInstr
&MI0
,
428 const MachineInstr
&MI1
,
429 const MachineRegisterInfo
*MRI
) const {
430 return MI0
.isIdenticalTo(MI1
, MachineInstr::IgnoreVRegDefs
);
433 MachineInstr
&TargetInstrInfo::duplicate(MachineBasicBlock
&MBB
,
434 MachineBasicBlock::iterator InsertBefore
, const MachineInstr
&Orig
) const {
435 assert(!Orig
.isNotDuplicable() && "Instruction cannot be duplicated");
436 MachineFunction
&MF
= *MBB
.getParent();
437 return MF
.cloneMachineInstrBundle(MBB
, InsertBefore
, Orig
);
440 // If the COPY instruction in MI can be folded to a stack operation, return
441 // the register class to use.
442 static const TargetRegisterClass
*canFoldCopy(const MachineInstr
&MI
,
444 assert(MI
.isCopy() && "MI must be a COPY instruction");
445 if (MI
.getNumOperands() != 2)
447 assert(FoldIdx
<2 && "FoldIdx refers no nonexistent operand");
449 const MachineOperand
&FoldOp
= MI
.getOperand(FoldIdx
);
450 const MachineOperand
&LiveOp
= MI
.getOperand(1 - FoldIdx
);
452 if (FoldOp
.getSubReg() || LiveOp
.getSubReg())
455 Register FoldReg
= FoldOp
.getReg();
456 Register LiveReg
= LiveOp
.getReg();
458 assert(Register::isVirtualRegister(FoldReg
) && "Cannot fold physregs");
460 const MachineRegisterInfo
&MRI
= MI
.getMF()->getRegInfo();
461 const TargetRegisterClass
*RC
= MRI
.getRegClass(FoldReg
);
463 if (Register::isPhysicalRegister(LiveOp
.getReg()))
464 return RC
->contains(LiveOp
.getReg()) ? RC
: nullptr;
466 if (RC
->hasSubClassEq(MRI
.getRegClass(LiveReg
)))
469 // FIXME: Allow folding when register classes are memory compatible.
473 MCInst
TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
475 std::pair
<unsigned, unsigned>
476 TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr
&MI
) const {
477 switch (MI
.getOpcode()) {
478 case TargetOpcode::STACKMAP
:
479 // StackMapLiveValues are foldable
480 return std::make_pair(0, StackMapOpers(&MI
).getVarIdx());
481 case TargetOpcode::PATCHPOINT
:
482 // For PatchPoint, the call args are not foldable (even if reported in the
483 // stackmap e.g. via anyregcc).
484 return std::make_pair(0, PatchPointOpers(&MI
).getVarIdx());
485 case TargetOpcode::STATEPOINT
:
486 // For statepoints, fold deopt and gc arguments, but not call arguments.
487 return std::make_pair(MI
.getNumDefs(), StatepointOpers(&MI
).getVarIdx());
489 llvm_unreachable("unexpected stackmap opcode");
493 static MachineInstr
*foldPatchpoint(MachineFunction
&MF
, MachineInstr
&MI
,
494 ArrayRef
<unsigned> Ops
, int FrameIndex
,
495 const TargetInstrInfo
&TII
) {
496 unsigned StartIdx
= 0;
497 unsigned NumDefs
= 0;
498 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
499 std::tie(NumDefs
, StartIdx
) = TII
.getPatchpointUnfoldableRange(MI
);
501 unsigned DefToFoldIdx
= MI
.getNumOperands();
503 // Return false if any operands requested for folding are not foldable (not
504 // part of the stackmap's live values).
505 for (unsigned Op
: Ops
) {
507 assert(DefToFoldIdx
== MI
.getNumOperands() && "Folding multiple defs");
509 } else if (Op
< StartIdx
) {
512 if (MI
.getOperand(Op
).isTied())
516 MachineInstr
*NewMI
=
517 MF
.CreateMachineInstr(TII
.get(MI
.getOpcode()), MI
.getDebugLoc(), true);
518 MachineInstrBuilder
MIB(MF
, NewMI
);
520 // No need to fold return, the meta data, and function arguments
521 for (unsigned i
= 0; i
< StartIdx
; ++i
)
522 if (i
!= DefToFoldIdx
)
523 MIB
.add(MI
.getOperand(i
));
525 for (unsigned i
= StartIdx
, e
= MI
.getNumOperands(); i
< e
; ++i
) {
526 MachineOperand
&MO
= MI
.getOperand(i
);
528 (void)MI
.isRegTiedToDefOperand(i
, &TiedTo
);
530 if (is_contained(Ops
, i
)) {
531 assert(TiedTo
== e
&& "Cannot fold tied operands");
533 unsigned SpillOffset
;
534 // Compute the spill slot size and offset.
535 const TargetRegisterClass
*RC
=
536 MF
.getRegInfo().getRegClass(MO
.getReg());
538 TII
.getStackSlotRange(RC
, MO
.getSubReg(), SpillSize
, SpillOffset
, MF
);
540 report_fatal_error("cannot spill patchpoint subregister operand");
541 MIB
.addImm(StackMaps::IndirectMemRefOp
);
542 MIB
.addImm(SpillSize
);
543 MIB
.addFrameIndex(FrameIndex
);
544 MIB
.addImm(SpillOffset
);
548 assert(TiedTo
< NumDefs
&& "Bad tied operand");
549 if (TiedTo
> DefToFoldIdx
)
551 NewMI
->tieOperands(TiedTo
, NewMI
->getNumOperands() - 1);
558 MachineInstr
*TargetInstrInfo::foldMemoryOperand(MachineInstr
&MI
,
559 ArrayRef
<unsigned> Ops
, int FI
,
561 VirtRegMap
*VRM
) const {
562 auto Flags
= MachineMemOperand::MONone
;
563 for (unsigned OpIdx
: Ops
)
564 Flags
|= MI
.getOperand(OpIdx
).isDef() ? MachineMemOperand::MOStore
565 : MachineMemOperand::MOLoad
;
567 MachineBasicBlock
*MBB
= MI
.getParent();
568 assert(MBB
&& "foldMemoryOperand needs an inserted instruction");
569 MachineFunction
&MF
= *MBB
->getParent();
571 // If we're not folding a load into a subreg, the size of the load is the
572 // size of the spill slot. But if we are, we need to figure out what the
573 // actual load size is.
575 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
576 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
578 if (Flags
& MachineMemOperand::MOStore
) {
579 MemSize
= MFI
.getObjectSize(FI
);
581 for (unsigned OpIdx
: Ops
) {
582 int64_t OpSize
= MFI
.getObjectSize(FI
);
584 if (auto SubReg
= MI
.getOperand(OpIdx
).getSubReg()) {
585 unsigned SubRegSize
= TRI
->getSubRegIdxSize(SubReg
);
586 if (SubRegSize
> 0 && !(SubRegSize
% 8))
587 OpSize
= SubRegSize
/ 8;
590 MemSize
= std::max(MemSize
, OpSize
);
594 assert(MemSize
&& "Did not expect a zero-sized stack slot");
596 MachineInstr
*NewMI
= nullptr;
598 if (MI
.getOpcode() == TargetOpcode::STACKMAP
||
599 MI
.getOpcode() == TargetOpcode::PATCHPOINT
||
600 MI
.getOpcode() == TargetOpcode::STATEPOINT
) {
601 // Fold stackmap/patchpoint.
602 NewMI
= foldPatchpoint(MF
, MI
, Ops
, FI
, *this);
604 MBB
->insert(MI
, NewMI
);
606 // Ask the target to do the actual folding.
607 NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, MI
, FI
, LIS
, VRM
);
611 NewMI
->setMemRefs(MF
, MI
.memoperands());
612 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
613 assert((!(Flags
& MachineMemOperand::MOStore
) ||
614 NewMI
->mayStore()) &&
615 "Folded a def to a non-store!");
616 assert((!(Flags
& MachineMemOperand::MOLoad
) ||
618 "Folded a use to a non-load!");
619 assert(MFI
.getObjectOffset(FI
) != -1);
620 MachineMemOperand
*MMO
=
621 MF
.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF
, FI
),
622 Flags
, MemSize
, MFI
.getObjectAlign(FI
));
623 NewMI
->addMemOperand(MF
, MMO
);
625 // The pass "x86 speculative load hardening" always attaches symbols to
626 // call instructions. We need copy it form old instruction.
627 NewMI
->cloneInstrSymbols(MF
, MI
);
632 // Straight COPY may fold as load/store.
633 if (!MI
.isCopy() || Ops
.size() != 1)
636 const TargetRegisterClass
*RC
= canFoldCopy(MI
, Ops
[0]);
640 const MachineOperand
&MO
= MI
.getOperand(1 - Ops
[0]);
641 MachineBasicBlock::iterator Pos
= MI
;
643 if (Flags
== MachineMemOperand::MOStore
)
644 storeRegToStackSlot(*MBB
, Pos
, MO
.getReg(), MO
.isKill(), FI
, RC
, TRI
);
646 loadRegFromStackSlot(*MBB
, Pos
, MO
.getReg(), FI
, RC
, TRI
);
650 MachineInstr
*TargetInstrInfo::foldMemoryOperand(MachineInstr
&MI
,
651 ArrayRef
<unsigned> Ops
,
652 MachineInstr
&LoadMI
,
653 LiveIntervals
*LIS
) const {
654 assert(LoadMI
.canFoldAsLoad() && "LoadMI isn't foldable!");
656 for (unsigned OpIdx
: Ops
)
657 assert(MI
.getOperand(OpIdx
).isUse() && "Folding load into def!");
660 MachineBasicBlock
&MBB
= *MI
.getParent();
661 MachineFunction
&MF
= *MBB
.getParent();
663 // Ask the target to do the actual folding.
664 MachineInstr
*NewMI
= nullptr;
667 if ((MI
.getOpcode() == TargetOpcode::STACKMAP
||
668 MI
.getOpcode() == TargetOpcode::PATCHPOINT
||
669 MI
.getOpcode() == TargetOpcode::STATEPOINT
) &&
670 isLoadFromStackSlot(LoadMI
, FrameIndex
)) {
671 // Fold stackmap/patchpoint.
672 NewMI
= foldPatchpoint(MF
, MI
, Ops
, FrameIndex
, *this);
674 NewMI
= &*MBB
.insert(MI
, NewMI
);
676 // Ask the target to do the actual folding.
677 NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, MI
, LoadMI
, LIS
);
683 // Copy the memoperands from the load to the folded instruction.
684 if (MI
.memoperands_empty()) {
685 NewMI
->setMemRefs(MF
, LoadMI
.memoperands());
687 // Handle the rare case of folding multiple loads.
688 NewMI
->setMemRefs(MF
, MI
.memoperands());
689 for (MachineInstr::mmo_iterator I
= LoadMI
.memoperands_begin(),
690 E
= LoadMI
.memoperands_end();
692 NewMI
->addMemOperand(MF
, *I
);
698 bool TargetInstrInfo::hasReassociableOperands(
699 const MachineInstr
&Inst
, const MachineBasicBlock
*MBB
) const {
700 const MachineOperand
&Op1
= Inst
.getOperand(1);
701 const MachineOperand
&Op2
= Inst
.getOperand(2);
702 const MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
704 // We need virtual register definitions for the operands that we will
706 MachineInstr
*MI1
= nullptr;
707 MachineInstr
*MI2
= nullptr;
708 if (Op1
.isReg() && Register::isVirtualRegister(Op1
.getReg()))
709 MI1
= MRI
.getUniqueVRegDef(Op1
.getReg());
710 if (Op2
.isReg() && Register::isVirtualRegister(Op2
.getReg()))
711 MI2
= MRI
.getUniqueVRegDef(Op2
.getReg());
713 // And they need to be in the trace (otherwise, they won't have a depth).
714 return MI1
&& MI2
&& MI1
->getParent() == MBB
&& MI2
->getParent() == MBB
;
717 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr
&Inst
,
718 bool &Commuted
) const {
719 const MachineBasicBlock
*MBB
= Inst
.getParent();
720 const MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
721 MachineInstr
*MI1
= MRI
.getUniqueVRegDef(Inst
.getOperand(1).getReg());
722 MachineInstr
*MI2
= MRI
.getUniqueVRegDef(Inst
.getOperand(2).getReg());
723 unsigned AssocOpcode
= Inst
.getOpcode();
725 // If only one operand has the same opcode and it's the second source operand,
726 // the operands must be commuted.
727 Commuted
= MI1
->getOpcode() != AssocOpcode
&& MI2
->getOpcode() == AssocOpcode
;
731 // 1. The previous instruction must be the same type as Inst.
732 // 2. The previous instruction must also be associative/commutative (this can
733 // be different even for instructions with the same opcode if traits like
734 // fast-math-flags are included).
735 // 3. The previous instruction must have virtual register definitions for its
736 // operands in the same basic block as Inst.
737 // 4. The previous instruction's result must only be used by Inst.
738 return MI1
->getOpcode() == AssocOpcode
&& isAssociativeAndCommutative(*MI1
) &&
739 hasReassociableOperands(*MI1
, MBB
) &&
740 MRI
.hasOneNonDBGUse(MI1
->getOperand(0).getReg());
743 // 1. The operation must be associative and commutative.
744 // 2. The instruction must have virtual register definitions for its
745 // operands in the same basic block.
746 // 3. The instruction must have a reassociable sibling.
747 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr
&Inst
,
748 bool &Commuted
) const {
749 return isAssociativeAndCommutative(Inst
) &&
750 hasReassociableOperands(Inst
, Inst
.getParent()) &&
751 hasReassociableSibling(Inst
, Commuted
);
754 // The concept of the reassociation pass is that these operations can benefit
755 // from this kind of transformation:
765 // breaking the dependency between A and B, allowing them to be executed in
766 // parallel (or back-to-back in a pipeline) instead of depending on each other.
768 // FIXME: This has the potential to be expensive (compile time) while not
769 // improving the code at all. Some ways to limit the overhead:
770 // 1. Track successful transforms; bail out if hit rate gets too low.
771 // 2. Only enable at -O3 or some other non-default optimization level.
772 // 3. Pre-screen pattern candidates here: if an operand of the previous
773 // instruction is known to not increase the critical path, then don't match
775 bool TargetInstrInfo::getMachineCombinerPatterns(
776 MachineInstr
&Root
, SmallVectorImpl
<MachineCombinerPattern
> &Patterns
,
777 bool DoRegPressureReduce
) const {
779 if (isReassociationCandidate(Root
, Commute
)) {
780 // We found a sequence of instructions that may be suitable for a
781 // reassociation of operands to increase ILP. Specify each commutation
782 // possibility for the Prev instruction in the sequence and let the
783 // machine combiner decide if changing the operands is worthwhile.
785 Patterns
.push_back(MachineCombinerPattern::REASSOC_AX_YB
);
786 Patterns
.push_back(MachineCombinerPattern::REASSOC_XA_YB
);
788 Patterns
.push_back(MachineCombinerPattern::REASSOC_AX_BY
);
789 Patterns
.push_back(MachineCombinerPattern::REASSOC_XA_BY
);
797 /// Return true when a code sequence can improve loop throughput.
799 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern
) const {
803 /// Attempt the reassociation transformation to reduce critical path length.
804 /// See the above comments before getMachineCombinerPatterns().
805 void TargetInstrInfo::reassociateOps(
806 MachineInstr
&Root
, MachineInstr
&Prev
,
807 MachineCombinerPattern Pattern
,
808 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
809 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
810 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
) const {
811 MachineFunction
*MF
= Root
.getMF();
812 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
813 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
814 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
815 const TargetRegisterClass
*RC
= Root
.getRegClassConstraint(0, TII
, TRI
);
817 // This array encodes the operand index for each parameter because the
818 // operands may be commuted. Each row corresponds to a pattern value,
819 // and each column specifies the index of A, B, X, Y.
820 unsigned OpIdx
[4][4] = {
829 case MachineCombinerPattern::REASSOC_AX_BY
: Row
= 0; break;
830 case MachineCombinerPattern::REASSOC_AX_YB
: Row
= 1; break;
831 case MachineCombinerPattern::REASSOC_XA_BY
: Row
= 2; break;
832 case MachineCombinerPattern::REASSOC_XA_YB
: Row
= 3; break;
833 default: llvm_unreachable("unexpected MachineCombinerPattern");
836 MachineOperand
&OpA
= Prev
.getOperand(OpIdx
[Row
][0]);
837 MachineOperand
&OpB
= Root
.getOperand(OpIdx
[Row
][1]);
838 MachineOperand
&OpX
= Prev
.getOperand(OpIdx
[Row
][2]);
839 MachineOperand
&OpY
= Root
.getOperand(OpIdx
[Row
][3]);
840 MachineOperand
&OpC
= Root
.getOperand(0);
842 Register RegA
= OpA
.getReg();
843 Register RegB
= OpB
.getReg();
844 Register RegX
= OpX
.getReg();
845 Register RegY
= OpY
.getReg();
846 Register RegC
= OpC
.getReg();
848 if (Register::isVirtualRegister(RegA
))
849 MRI
.constrainRegClass(RegA
, RC
);
850 if (Register::isVirtualRegister(RegB
))
851 MRI
.constrainRegClass(RegB
, RC
);
852 if (Register::isVirtualRegister(RegX
))
853 MRI
.constrainRegClass(RegX
, RC
);
854 if (Register::isVirtualRegister(RegY
))
855 MRI
.constrainRegClass(RegY
, RC
);
856 if (Register::isVirtualRegister(RegC
))
857 MRI
.constrainRegClass(RegC
, RC
);
859 // Create a new virtual register for the result of (X op Y) instead of
860 // recycling RegB because the MachineCombiner's computation of the critical
861 // path requires a new register definition rather than an existing one.
862 Register NewVR
= MRI
.createVirtualRegister(RC
);
863 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
865 unsigned Opcode
= Root
.getOpcode();
866 bool KillA
= OpA
.isKill();
867 bool KillX
= OpX
.isKill();
868 bool KillY
= OpY
.isKill();
870 // Create new instructions for insertion.
871 MachineInstrBuilder MIB1
=
872 BuildMI(*MF
, Prev
.getDebugLoc(), TII
->get(Opcode
), NewVR
)
873 .addReg(RegX
, getKillRegState(KillX
))
874 .addReg(RegY
, getKillRegState(KillY
))
875 .setMIFlags(Prev
.getFlags());
876 MachineInstrBuilder MIB2
=
877 BuildMI(*MF
, Root
.getDebugLoc(), TII
->get(Opcode
), RegC
)
878 .addReg(RegA
, getKillRegState(KillA
))
879 .addReg(NewVR
, getKillRegState(true))
880 .setMIFlags(Root
.getFlags());
882 setSpecialOperandAttr(Root
, Prev
, *MIB1
, *MIB2
);
884 // Record new instructions for insertion and old instructions for deletion.
885 InsInstrs
.push_back(MIB1
);
886 InsInstrs
.push_back(MIB2
);
887 DelInstrs
.push_back(&Prev
);
888 DelInstrs
.push_back(&Root
);
891 void TargetInstrInfo::genAlternativeCodeSequence(
892 MachineInstr
&Root
, MachineCombinerPattern Pattern
,
893 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
894 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
895 DenseMap
<unsigned, unsigned> &InstIdxForVirtReg
) const {
896 MachineRegisterInfo
&MRI
= Root
.getMF()->getRegInfo();
898 // Select the previous instruction in the sequence based on the input pattern.
899 MachineInstr
*Prev
= nullptr;
901 case MachineCombinerPattern::REASSOC_AX_BY
:
902 case MachineCombinerPattern::REASSOC_XA_BY
:
903 Prev
= MRI
.getUniqueVRegDef(Root
.getOperand(1).getReg());
905 case MachineCombinerPattern::REASSOC_AX_YB
:
906 case MachineCombinerPattern::REASSOC_XA_YB
:
907 Prev
= MRI
.getUniqueVRegDef(Root
.getOperand(2).getReg());
913 assert(Prev
&& "Unknown pattern for machine combiner");
915 reassociateOps(Root
, *Prev
, Pattern
, InsInstrs
, DelInstrs
, InstIdxForVirtReg
);
918 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
919 const MachineInstr
&MI
) const {
920 const MachineFunction
&MF
= *MI
.getMF();
921 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
923 // Remat clients assume operand 0 is the defined register.
924 if (!MI
.getNumOperands() || !MI
.getOperand(0).isReg())
926 Register DefReg
= MI
.getOperand(0).getReg();
928 // A sub-register definition can only be rematerialized if the instruction
929 // doesn't read the other parts of the register. Otherwise it is really a
930 // read-modify-write operation on the full virtual register which cannot be
932 if (Register::isVirtualRegister(DefReg
) && MI
.getOperand(0).getSubReg() &&
933 MI
.readsVirtualRegister(DefReg
))
936 // A load from a fixed stack slot can be rematerialized. This may be
937 // redundant with subsequent checks, but it's target-independent,
938 // simple, and a common case.
940 if (isLoadFromStackSlot(MI
, FrameIdx
) &&
941 MF
.getFrameInfo().isImmutableObjectIndex(FrameIdx
))
944 // Avoid instructions obviously unsafe for remat.
945 if (MI
.isNotDuplicable() || MI
.mayStore() || MI
.mayRaiseFPException() ||
946 MI
.hasUnmodeledSideEffects())
949 // Don't remat inline asm. We have no idea how expensive it is
950 // even if it's side effect free.
951 if (MI
.isInlineAsm())
954 // Avoid instructions which load from potentially varying memory.
955 if (MI
.mayLoad() && !MI
.isDereferenceableInvariantLoad())
958 // If any of the registers accessed are non-constant, conservatively assume
959 // the instruction is not rematerializable.
960 for (const MachineOperand
&MO
: MI
.operands()) {
961 if (!MO
.isReg()) continue;
962 Register Reg
= MO
.getReg();
966 // Check for a well-behaved physical register.
967 if (Register::isPhysicalRegister(Reg
)) {
969 // If the physreg has no defs anywhere, it's just an ambient register
970 // and we can freely move its uses. Alternatively, if it's allocatable,
971 // it could get allocated to something with a def during allocation.
972 if (!MRI
.isConstantPhysReg(Reg
))
975 // A physreg def. We can't remat it.
981 // Only allow one virtual-register def. There may be multiple defs of the
982 // same virtual register, though.
983 if (MO
.isDef() && Reg
!= DefReg
)
986 // Don't allow any virtual-register uses. Rematting an instruction with
987 // virtual register uses would length the live ranges of the uses, which
988 // is not necessarily a good idea, certainly not "trivial".
993 // Everything checked out.
997 int TargetInstrInfo::getSPAdjust(const MachineInstr
&MI
) const {
998 const MachineFunction
*MF
= MI
.getMF();
999 const TargetFrameLowering
*TFI
= MF
->getSubtarget().getFrameLowering();
1000 bool StackGrowsDown
=
1001 TFI
->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown
;
1003 unsigned FrameSetupOpcode
= getCallFrameSetupOpcode();
1004 unsigned FrameDestroyOpcode
= getCallFrameDestroyOpcode();
1006 if (!isFrameInstr(MI
))
1009 int SPAdj
= TFI
->alignSPAdjust(getFrameSize(MI
));
1011 if ((!StackGrowsDown
&& MI
.getOpcode() == FrameSetupOpcode
) ||
1012 (StackGrowsDown
&& MI
.getOpcode() == FrameDestroyOpcode
))
1018 /// isSchedulingBoundary - Test if the given instruction should be
1019 /// considered a scheduling boundary. This primarily includes labels
1020 /// and terminators.
1021 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr
&MI
,
1022 const MachineBasicBlock
*MBB
,
1023 const MachineFunction
&MF
) const {
1024 // Terminators and labels can't be scheduled around.
1025 if (MI
.isTerminator() || MI
.isPosition())
1028 // INLINEASM_BR can jump to another block
1029 if (MI
.getOpcode() == TargetOpcode::INLINEASM_BR
)
1032 // Don't attempt to schedule around any instruction that defines
1033 // a stack-oriented pointer, as it's unlikely to be profitable. This
1034 // saves compile time, because it doesn't require every single
1035 // stack slot reference to depend on the instruction that does the
1037 const TargetLowering
&TLI
= *MF
.getSubtarget().getTargetLowering();
1038 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
1039 return MI
.modifiesRegister(TLI
.getStackPointerRegisterToSaveRestore(), TRI
);
1042 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1043 // may choose to honor.
1044 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1045 return !DisableHazardRecognizer
;
1048 // Default implementation of CreateTargetRAHazardRecognizer.
1049 ScheduleHazardRecognizer
*TargetInstrInfo::
1050 CreateTargetHazardRecognizer(const TargetSubtargetInfo
*STI
,
1051 const ScheduleDAG
*DAG
) const {
1052 // Dummy hazard recognizer allows all instructions to issue.
1053 return new ScheduleHazardRecognizer();
1056 // Default implementation of CreateTargetMIHazardRecognizer.
1057 ScheduleHazardRecognizer
*TargetInstrInfo::CreateTargetMIHazardRecognizer(
1058 const InstrItineraryData
*II
, const ScheduleDAGMI
*DAG
) const {
1059 return new ScoreboardHazardRecognizer(II
, DAG
, "machine-scheduler");
1062 // Default implementation of CreateTargetPostRAHazardRecognizer.
1063 ScheduleHazardRecognizer
*TargetInstrInfo::
1064 CreateTargetPostRAHazardRecognizer(const InstrItineraryData
*II
,
1065 const ScheduleDAG
*DAG
) const {
1066 return new ScoreboardHazardRecognizer(II
, DAG
, "post-RA-sched");
1069 // Default implementation of getMemOperandWithOffset.
1070 bool TargetInstrInfo::getMemOperandWithOffset(
1071 const MachineInstr
&MI
, const MachineOperand
*&BaseOp
, int64_t &Offset
,
1072 bool &OffsetIsScalable
, const TargetRegisterInfo
*TRI
) const {
1073 SmallVector
<const MachineOperand
*, 4> BaseOps
;
1075 if (!getMemOperandsWithOffsetWidth(MI
, BaseOps
, Offset
, OffsetIsScalable
,
1077 BaseOps
.size() != 1)
1079 BaseOp
= BaseOps
.front();
1083 //===----------------------------------------------------------------------===//
1084 // SelectionDAG latency interface.
1085 //===----------------------------------------------------------------------===//
1088 TargetInstrInfo::getOperandLatency(const InstrItineraryData
*ItinData
,
1089 SDNode
*DefNode
, unsigned DefIdx
,
1090 SDNode
*UseNode
, unsigned UseIdx
) const {
1091 if (!ItinData
|| ItinData
->isEmpty())
1094 if (!DefNode
->isMachineOpcode())
1097 unsigned DefClass
= get(DefNode
->getMachineOpcode()).getSchedClass();
1098 if (!UseNode
->isMachineOpcode())
1099 return ItinData
->getOperandCycle(DefClass
, DefIdx
);
1100 unsigned UseClass
= get(UseNode
->getMachineOpcode()).getSchedClass();
1101 return ItinData
->getOperandLatency(DefClass
, DefIdx
, UseClass
, UseIdx
);
1104 int TargetInstrInfo::getInstrLatency(const InstrItineraryData
*ItinData
,
1106 if (!ItinData
|| ItinData
->isEmpty())
1109 if (!N
->isMachineOpcode())
1112 return ItinData
->getStageLatency(get(N
->getMachineOpcode()).getSchedClass());
1115 //===----------------------------------------------------------------------===//
1116 // MachineInstr latency interface.
1117 //===----------------------------------------------------------------------===//
1119 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData
*ItinData
,
1120 const MachineInstr
&MI
) const {
1121 if (!ItinData
|| ItinData
->isEmpty())
1124 unsigned Class
= MI
.getDesc().getSchedClass();
1125 int UOps
= ItinData
->Itineraries
[Class
].NumMicroOps
;
1129 // The # of u-ops is dynamically determined. The specific target should
1130 // override this function to return the right number.
1134 /// Return the default expected latency for a def based on it's opcode.
1135 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel
&SchedModel
,
1136 const MachineInstr
&DefMI
) const {
1137 if (DefMI
.isTransient())
1139 if (DefMI
.mayLoad())
1140 return SchedModel
.LoadLatency
;
1141 if (isHighLatencyDef(DefMI
.getOpcode()))
1142 return SchedModel
.HighLatency
;
1146 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr
&) const {
1150 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData
*ItinData
,
1151 const MachineInstr
&MI
,
1152 unsigned *PredCost
) const {
1153 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1154 // still have a MinLatency property, which getStageLatency checks.
1156 return MI
.mayLoad() ? 2 : 1;
1158 return ItinData
->getStageLatency(MI
.getDesc().getSchedClass());
1161 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel
&SchedModel
,
1162 const MachineInstr
&DefMI
,
1163 unsigned DefIdx
) const {
1164 const InstrItineraryData
*ItinData
= SchedModel
.getInstrItineraries();
1165 if (!ItinData
|| ItinData
->isEmpty())
1168 unsigned DefClass
= DefMI
.getDesc().getSchedClass();
1169 int DefCycle
= ItinData
->getOperandCycle(DefClass
, DefIdx
);
1170 return (DefCycle
!= -1 && DefCycle
<= 1);
1173 Optional
<ParamLoadedValue
>
1174 TargetInstrInfo::describeLoadedValue(const MachineInstr
&MI
,
1175 Register Reg
) const {
1176 const MachineFunction
*MF
= MI
.getMF();
1177 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
1178 DIExpression
*Expr
= DIExpression::get(MF
->getFunction().getContext(), {});
1180 bool OffsetIsScalable
;
1182 // To simplify the sub-register handling, verify that we only need to
1183 // consider physical registers.
1184 assert(MF
->getProperties().hasProperty(
1185 MachineFunctionProperties::Property::NoVRegs
));
1187 if (auto DestSrc
= isCopyInstr(MI
)) {
1188 Register DestReg
= DestSrc
->Destination
->getReg();
1190 // If the copy destination is the forwarding reg, describe the forwarding
1191 // reg using the copy source as the backup location. Example:
1194 // call callee(x0) ; x0 described as x7
1196 return ParamLoadedValue(*DestSrc
->Source
, Expr
);
1198 // Cases where super- or sub-registers needs to be described should
1199 // be handled by the target's hook implementation.
1200 assert(!TRI
->isSuperOrSubRegisterEq(Reg
, DestReg
) &&
1201 "TargetInstrInfo::describeLoadedValue can't describe super- or "
1202 "sub-regs for copy instructions");
1204 } else if (auto RegImm
= isAddImmediate(MI
, Reg
)) {
1205 Register SrcReg
= RegImm
->Reg
;
1206 Offset
= RegImm
->Imm
;
1207 Expr
= DIExpression::prepend(Expr
, DIExpression::ApplyOffset
, Offset
);
1208 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg
, false), Expr
);
1209 } else if (MI
.hasOneMemOperand()) {
1210 // Only describe memory which provably does not escape the function. As
1211 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1212 // callee (or by another thread).
1213 const auto &TII
= MF
->getSubtarget().getInstrInfo();
1214 const MachineFrameInfo
&MFI
= MF
->getFrameInfo();
1215 const MachineMemOperand
*MMO
= MI
.memoperands()[0];
1216 const PseudoSourceValue
*PSV
= MMO
->getPseudoValue();
1218 // If the address points to "special" memory (e.g. a spill slot), it's
1219 // sufficient to check that it isn't aliased by any high-level IR value.
1220 if (!PSV
|| PSV
->mayAlias(&MFI
))
1223 const MachineOperand
*BaseOp
;
1224 if (!TII
->getMemOperandWithOffset(MI
, BaseOp
, Offset
, OffsetIsScalable
,
1228 // FIXME: Scalable offsets are not yet handled in the offset code below.
1229 if (OffsetIsScalable
)
1232 // TODO: Can currently only handle mem instructions with a single define.
1233 // An example from the x86 target:
1235 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1238 if (MI
.getNumExplicitDefs() != 1)
1241 // TODO: In what way do we need to take Reg into consideration here?
1243 SmallVector
<uint64_t, 8> Ops
;
1244 DIExpression::appendOffset(Ops
, Offset
);
1245 Ops
.push_back(dwarf::DW_OP_deref_size
);
1246 Ops
.push_back(MMO
->getSize());
1247 Expr
= DIExpression::prependOpcodes(Expr
, Ops
);
1248 return ParamLoadedValue(*BaseOp
, Expr
);
1254 /// Both DefMI and UseMI must be valid. By default, call directly to the
1255 /// itinerary. This may be overriden by the target.
1256 int TargetInstrInfo::getOperandLatency(const InstrItineraryData
*ItinData
,
1257 const MachineInstr
&DefMI
,
1259 const MachineInstr
&UseMI
,
1260 unsigned UseIdx
) const {
1261 unsigned DefClass
= DefMI
.getDesc().getSchedClass();
1262 unsigned UseClass
= UseMI
.getDesc().getSchedClass();
1263 return ItinData
->getOperandLatency(DefClass
, DefIdx
, UseClass
, UseIdx
);
1266 bool TargetInstrInfo::getRegSequenceInputs(
1267 const MachineInstr
&MI
, unsigned DefIdx
,
1268 SmallVectorImpl
<RegSubRegPairAndIdx
> &InputRegs
) const {
1269 assert((MI
.isRegSequence() ||
1270 MI
.isRegSequenceLike()) && "Instruction do not have the proper type");
1272 if (!MI
.isRegSequence())
1273 return getRegSequenceLikeInputs(MI
, DefIdx
, InputRegs
);
1275 // We are looking at:
1276 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1277 assert(DefIdx
== 0 && "REG_SEQUENCE only has one def");
1278 for (unsigned OpIdx
= 1, EndOpIdx
= MI
.getNumOperands(); OpIdx
!= EndOpIdx
;
1280 const MachineOperand
&MOReg
= MI
.getOperand(OpIdx
);
1281 if (MOReg
.isUndef())
1283 const MachineOperand
&MOSubIdx
= MI
.getOperand(OpIdx
+ 1);
1284 assert(MOSubIdx
.isImm() &&
1285 "One of the subindex of the reg_sequence is not an immediate");
1286 // Record Reg:SubReg, SubIdx.
1287 InputRegs
.push_back(RegSubRegPairAndIdx(MOReg
.getReg(), MOReg
.getSubReg(),
1288 (unsigned)MOSubIdx
.getImm()));
1293 bool TargetInstrInfo::getExtractSubregInputs(
1294 const MachineInstr
&MI
, unsigned DefIdx
,
1295 RegSubRegPairAndIdx
&InputReg
) const {
1296 assert((MI
.isExtractSubreg() ||
1297 MI
.isExtractSubregLike()) && "Instruction do not have the proper type");
1299 if (!MI
.isExtractSubreg())
1300 return getExtractSubregLikeInputs(MI
, DefIdx
, InputReg
);
1302 // We are looking at:
1303 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1304 assert(DefIdx
== 0 && "EXTRACT_SUBREG only has one def");
1305 const MachineOperand
&MOReg
= MI
.getOperand(1);
1306 if (MOReg
.isUndef())
1308 const MachineOperand
&MOSubIdx
= MI
.getOperand(2);
1309 assert(MOSubIdx
.isImm() &&
1310 "The subindex of the extract_subreg is not an immediate");
1312 InputReg
.Reg
= MOReg
.getReg();
1313 InputReg
.SubReg
= MOReg
.getSubReg();
1314 InputReg
.SubIdx
= (unsigned)MOSubIdx
.getImm();
1318 bool TargetInstrInfo::getInsertSubregInputs(
1319 const MachineInstr
&MI
, unsigned DefIdx
,
1320 RegSubRegPair
&BaseReg
, RegSubRegPairAndIdx
&InsertedReg
) const {
1321 assert((MI
.isInsertSubreg() ||
1322 MI
.isInsertSubregLike()) && "Instruction do not have the proper type");
1324 if (!MI
.isInsertSubreg())
1325 return getInsertSubregLikeInputs(MI
, DefIdx
, BaseReg
, InsertedReg
);
1327 // We are looking at:
1328 // Def = INSERT_SEQUENCE v0, v1, sub0.
1329 assert(DefIdx
== 0 && "INSERT_SUBREG only has one def");
1330 const MachineOperand
&MOBaseReg
= MI
.getOperand(1);
1331 const MachineOperand
&MOInsertedReg
= MI
.getOperand(2);
1332 if (MOInsertedReg
.isUndef())
1334 const MachineOperand
&MOSubIdx
= MI
.getOperand(3);
1335 assert(MOSubIdx
.isImm() &&
1336 "One of the subindex of the reg_sequence is not an immediate");
1337 BaseReg
.Reg
= MOBaseReg
.getReg();
1338 BaseReg
.SubReg
= MOBaseReg
.getSubReg();
1340 InsertedReg
.Reg
= MOInsertedReg
.getReg();
1341 InsertedReg
.SubReg
= MOInsertedReg
.getSubReg();
1342 InsertedReg
.SubIdx
= (unsigned)MOSubIdx
.getImm();
1346 // Returns a MIRPrinter comment for this machine operand.
1347 std::string
TargetInstrInfo::createMIROperandComment(
1348 const MachineInstr
&MI
, const MachineOperand
&Op
, unsigned OpIdx
,
1349 const TargetRegisterInfo
*TRI
) const {
1351 if (!MI
.isInlineAsm())
1355 raw_string_ostream
OS(Flags
);
1357 if (OpIdx
== InlineAsm::MIOp_ExtraInfo
) {
1358 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1359 unsigned ExtraInfo
= Op
.getImm();
1361 for (StringRef Info
: InlineAsm::getExtraInfoNames(ExtraInfo
)) {
1371 int FlagIdx
= MI
.findInlineAsmFlagIdx(OpIdx
);
1372 if (FlagIdx
< 0 || (unsigned)FlagIdx
!= OpIdx
)
1375 assert(Op
.isImm() && "Expected flag operand to be an immediate");
1376 // Pretty print the inline asm operand descriptor.
1377 unsigned Flag
= Op
.getImm();
1378 unsigned Kind
= InlineAsm::getKind(Flag
);
1379 OS
<< InlineAsm::getKindName(Kind
);
1382 if (!InlineAsm::isImmKind(Flag
) && !InlineAsm::isMemKind(Flag
) &&
1383 InlineAsm::hasRegClassConstraint(Flag
, RCID
)) {
1385 OS
<< ':' << TRI
->getRegClassName(TRI
->getRegClass(RCID
));
1387 OS
<< ":RC" << RCID
;
1390 if (InlineAsm::isMemKind(Flag
)) {
1391 unsigned MCID
= InlineAsm::getMemoryConstraintID(Flag
);
1392 OS
<< ":" << InlineAsm::getMemConstraintName(MCID
);
1395 unsigned TiedTo
= 0;
1396 if (InlineAsm::isUseOperandTiedToDef(Flag
, TiedTo
))
1397 OS
<< " tiedto:$" << TiedTo
;
1402 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default;
1404 void TargetInstrInfo::mergeOutliningCandidateAttributes(
1405 Function
&F
, std::vector
<outliner::Candidate
> &Candidates
) const {
1406 // Include target features from an arbitrary candidate for the outlined
1407 // function. This makes sure the outlined function knows what kinds of
1408 // instructions are going into it. This is fine, since all parent functions
1409 // must necessarily support the instructions that are in the outlined region.
1410 outliner::Candidate
&FirstCand
= Candidates
.front();
1411 const Function
&ParentFn
= FirstCand
.getMF()->getFunction();
1412 if (ParentFn
.hasFnAttribute("target-features"))
1413 F
.addFnAttr(ParentFn
.getFnAttribute("target-features"));
1414 if (ParentFn
.hasFnAttribute("target-cpu"))
1415 F
.addFnAttr(ParentFn
.getFnAttribute("target-cpu"));
1417 // Set nounwind, so we don't generate eh_frame.
1418 if (llvm::all_of(Candidates
, [](const outliner::Candidate
&C
) {
1419 return C
.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind
);
1421 F
.addFnAttr(Attribute::NoUnwind
);
1424 bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock
&MBB
,
1425 unsigned &Flags
) const {
1426 // Some instrumentations create special TargetOpcode at the start which
1427 // expands to special code sequences which must be present.
1428 auto First
= MBB
.getFirstNonDebugInstr();
1429 if (First
!= MBB
.end() &&
1430 (First
->getOpcode() == TargetOpcode::FENTRY_CALL
||
1431 First
->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER
))