1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/TargetInstrInfo.h"
14 #include "llvm/ADT/StringExtras.h"
15 #include "llvm/BinaryFormat/Dwarf.h"
16 #include "llvm/CodeGen/MachineCombinerPattern.h"
17 #include "llvm/CodeGen/MachineFrameInfo.h"
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
19 #include "llvm/CodeGen/MachineMemOperand.h"
20 #include "llvm/CodeGen/MachineRegisterInfo.h"
21 #include "llvm/CodeGen/MachineScheduler.h"
22 #include "llvm/CodeGen/MachineTraceMetrics.h"
23 #include "llvm/CodeGen/PseudoSourceValue.h"
24 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
25 #include "llvm/CodeGen/StackMaps.h"
26 #include "llvm/CodeGen/TargetFrameLowering.h"
27 #include "llvm/CodeGen/TargetLowering.h"
28 #include "llvm/CodeGen/TargetRegisterInfo.h"
29 #include "llvm/CodeGen/TargetSchedule.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfoMetadata.h"
32 #include "llvm/MC/MCAsmInfo.h"
33 #include "llvm/MC/MCInstrItineraries.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/raw_ostream.h"
40 static cl::opt
<bool> DisableHazardRecognizer(
41 "disable-sched-hazard", cl::Hidden
, cl::init(false),
42 cl::desc("Disable hazard detection during preRA scheduling"));
44 TargetInstrInfo::~TargetInstrInfo() = default;
46 const TargetRegisterClass
*
47 TargetInstrInfo::getRegClass(const MCInstrDesc
&MCID
, unsigned OpNum
,
48 const TargetRegisterInfo
*TRI
,
49 const MachineFunction
&MF
) const {
50 if (OpNum
>= MCID
.getNumOperands())
53 short RegClass
= MCID
.operands()[OpNum
].RegClass
;
54 if (MCID
.operands()[OpNum
].isLookupPtrRegClass())
55 return TRI
->getPointerRegClass(MF
, RegClass
);
57 // Instructions like INSERT_SUBREG do not have fixed register classes.
61 // Otherwise just look it up normally.
62 return TRI
->getRegClass(RegClass
);
65 /// insertNoop - Insert a noop into the instruction stream at the specified
67 void TargetInstrInfo::insertNoop(MachineBasicBlock
&MBB
,
68 MachineBasicBlock::iterator MI
) const {
69 llvm_unreachable("Target didn't implement insertNoop!");
72 /// insertNoops - Insert noops into the instruction stream at the specified
74 void TargetInstrInfo::insertNoops(MachineBasicBlock
&MBB
,
75 MachineBasicBlock::iterator MI
,
76 unsigned Quantity
) const {
77 for (unsigned i
= 0; i
< Quantity
; ++i
)
81 static bool isAsmComment(const char *Str
, const MCAsmInfo
&MAI
) {
82 return strncmp(Str
, MAI
.getCommentString().data(),
83 MAI
.getCommentString().size()) == 0;
86 /// Measure the specified inline asm to determine an approximation of its
88 /// Comments (which run till the next SeparatorString or newline) do not
89 /// count as an instruction.
90 /// Any other non-whitespace text is considered an instruction, with
91 /// multiple instructions separated by SeparatorString or newlines.
92 /// Variable-length instructions are not handled here; this function
93 /// may be overloaded in the target code to do that.
94 /// We implement a special case of the .space directive which takes only a
95 /// single integer argument in base 10 that is the size in bytes. This is a
96 /// restricted form of the GAS directive in that we only interpret
97 /// simple--i.e. not a logical or arithmetic expression--size values without
98 /// the optional fill value. This is primarily used for creating arbitrary
99 /// sized inline asm blocks for testing purposes.
100 unsigned TargetInstrInfo::getInlineAsmLength(
102 const MCAsmInfo
&MAI
, const TargetSubtargetInfo
*STI
) const {
103 // Count the number of instructions in the asm.
104 bool AtInsnStart
= true;
106 const unsigned MaxInstLength
= MAI
.getMaxInstLength(STI
);
107 for (; *Str
; ++Str
) {
108 if (*Str
== '\n' || strncmp(Str
, MAI
.getSeparatorString(),
109 strlen(MAI
.getSeparatorString())) == 0) {
111 } else if (isAsmComment(Str
, MAI
)) {
112 // Stop counting as an instruction after a comment until the next
117 if (AtInsnStart
&& !isSpace(static_cast<unsigned char>(*Str
))) {
118 unsigned AddLength
= MaxInstLength
;
119 if (strncmp(Str
, ".space", 6) == 0) {
122 SpaceSize
= strtol(Str
+ 6, &EStr
, 10);
123 SpaceSize
= SpaceSize
< 0 ? 0 : SpaceSize
;
124 while (*EStr
!= '\n' && isSpace(static_cast<unsigned char>(*EStr
)))
126 if (*EStr
== '\0' || *EStr
== '\n' ||
127 isAsmComment(EStr
, MAI
)) // Successfully parsed .space argument
128 AddLength
= SpaceSize
;
138 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
139 /// after it, replacing it with an unconditional branch to NewDest.
141 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail
,
142 MachineBasicBlock
*NewDest
) const {
143 MachineBasicBlock
*MBB
= Tail
->getParent();
145 // Remove all the old successors of MBB from the CFG.
146 while (!MBB
->succ_empty())
147 MBB
->removeSuccessor(MBB
->succ_begin());
149 // Save off the debug loc before erasing the instruction.
150 DebugLoc DL
= Tail
->getDebugLoc();
152 // Update call site info and remove all the dead instructions
153 // from the end of MBB.
154 while (Tail
!= MBB
->end()) {
156 if (MI
->shouldUpdateCallSiteInfo())
157 MBB
->getParent()->eraseCallSiteInfo(&*MI
);
161 // If MBB isn't immediately before MBB, insert a branch to it.
162 if (++MachineFunction::iterator(MBB
) != MachineFunction::iterator(NewDest
))
163 insertBranch(*MBB
, NewDest
, nullptr, SmallVector
<MachineOperand
, 0>(), DL
);
164 MBB
->addSuccessor(NewDest
);
167 MachineInstr
*TargetInstrInfo::commuteInstructionImpl(MachineInstr
&MI
,
168 bool NewMI
, unsigned Idx1
,
169 unsigned Idx2
) const {
170 const MCInstrDesc
&MCID
= MI
.getDesc();
171 bool HasDef
= MCID
.getNumDefs();
172 if (HasDef
&& !MI
.getOperand(0).isReg())
173 // No idea how to commute this instruction. Target should implement its own.
176 unsigned CommutableOpIdx1
= Idx1
; (void)CommutableOpIdx1
;
177 unsigned CommutableOpIdx2
= Idx2
; (void)CommutableOpIdx2
;
178 assert(findCommutedOpIndices(MI
, CommutableOpIdx1
, CommutableOpIdx2
) &&
179 CommutableOpIdx1
== Idx1
&& CommutableOpIdx2
== Idx2
&&
180 "TargetInstrInfo::CommuteInstructionImpl(): not commutable operands.");
181 assert(MI
.getOperand(Idx1
).isReg() && MI
.getOperand(Idx2
).isReg() &&
182 "This only knows how to commute register operands so far");
184 Register Reg0
= HasDef
? MI
.getOperand(0).getReg() : Register();
185 Register Reg1
= MI
.getOperand(Idx1
).getReg();
186 Register Reg2
= MI
.getOperand(Idx2
).getReg();
187 unsigned SubReg0
= HasDef
? MI
.getOperand(0).getSubReg() : 0;
188 unsigned SubReg1
= MI
.getOperand(Idx1
).getSubReg();
189 unsigned SubReg2
= MI
.getOperand(Idx2
).getSubReg();
190 bool Reg1IsKill
= MI
.getOperand(Idx1
).isKill();
191 bool Reg2IsKill
= MI
.getOperand(Idx2
).isKill();
192 bool Reg1IsUndef
= MI
.getOperand(Idx1
).isUndef();
193 bool Reg2IsUndef
= MI
.getOperand(Idx2
).isUndef();
194 bool Reg1IsInternal
= MI
.getOperand(Idx1
).isInternalRead();
195 bool Reg2IsInternal
= MI
.getOperand(Idx2
).isInternalRead();
196 // Avoid calling isRenamable for virtual registers since we assert that
197 // renamable property is only queried/set for physical registers.
198 bool Reg1IsRenamable
=
199 Reg1
.isPhysical() ? MI
.getOperand(Idx1
).isRenamable() : false;
200 bool Reg2IsRenamable
=
201 Reg2
.isPhysical() ? MI
.getOperand(Idx2
).isRenamable() : false;
202 // If destination is tied to either of the commuted source register, then
203 // it must be updated.
204 if (HasDef
&& Reg0
== Reg1
&&
205 MI
.getDesc().getOperandConstraint(Idx1
, MCOI::TIED_TO
) == 0) {
209 } else if (HasDef
&& Reg0
== Reg2
&&
210 MI
.getDesc().getOperandConstraint(Idx2
, MCOI::TIED_TO
) == 0) {
216 MachineInstr
*CommutedMI
= nullptr;
218 // Create a new instruction.
219 MachineFunction
&MF
= *MI
.getMF();
220 CommutedMI
= MF
.CloneMachineInstr(&MI
);
226 CommutedMI
->getOperand(0).setReg(Reg0
);
227 CommutedMI
->getOperand(0).setSubReg(SubReg0
);
229 CommutedMI
->getOperand(Idx2
).setReg(Reg1
);
230 CommutedMI
->getOperand(Idx1
).setReg(Reg2
);
231 CommutedMI
->getOperand(Idx2
).setSubReg(SubReg1
);
232 CommutedMI
->getOperand(Idx1
).setSubReg(SubReg2
);
233 CommutedMI
->getOperand(Idx2
).setIsKill(Reg1IsKill
);
234 CommutedMI
->getOperand(Idx1
).setIsKill(Reg2IsKill
);
235 CommutedMI
->getOperand(Idx2
).setIsUndef(Reg1IsUndef
);
236 CommutedMI
->getOperand(Idx1
).setIsUndef(Reg2IsUndef
);
237 CommutedMI
->getOperand(Idx2
).setIsInternalRead(Reg1IsInternal
);
238 CommutedMI
->getOperand(Idx1
).setIsInternalRead(Reg2IsInternal
);
239 // Avoid calling setIsRenamable for virtual registers since we assert that
240 // renamable property is only queried/set for physical registers.
241 if (Reg1
.isPhysical())
242 CommutedMI
->getOperand(Idx2
).setIsRenamable(Reg1IsRenamable
);
243 if (Reg2
.isPhysical())
244 CommutedMI
->getOperand(Idx1
).setIsRenamable(Reg2IsRenamable
);
248 MachineInstr
*TargetInstrInfo::commuteInstruction(MachineInstr
&MI
, bool NewMI
,
250 unsigned OpIdx2
) const {
251 // If OpIdx1 or OpIdx2 is not specified, then this method is free to choose
252 // any commutable operand, which is done in findCommutedOpIndices() method
254 if ((OpIdx1
== CommuteAnyOperandIndex
|| OpIdx2
== CommuteAnyOperandIndex
) &&
255 !findCommutedOpIndices(MI
, OpIdx1
, OpIdx2
)) {
256 assert(MI
.isCommutable() &&
257 "Precondition violation: MI must be commutable.");
260 return commuteInstructionImpl(MI
, NewMI
, OpIdx1
, OpIdx2
);
263 bool TargetInstrInfo::fixCommutedOpIndices(unsigned &ResultIdx1
,
264 unsigned &ResultIdx2
,
265 unsigned CommutableOpIdx1
,
266 unsigned CommutableOpIdx2
) {
267 if (ResultIdx1
== CommuteAnyOperandIndex
&&
268 ResultIdx2
== CommuteAnyOperandIndex
) {
269 ResultIdx1
= CommutableOpIdx1
;
270 ResultIdx2
= CommutableOpIdx2
;
271 } else if (ResultIdx1
== CommuteAnyOperandIndex
) {
272 if (ResultIdx2
== CommutableOpIdx1
)
273 ResultIdx1
= CommutableOpIdx2
;
274 else if (ResultIdx2
== CommutableOpIdx2
)
275 ResultIdx1
= CommutableOpIdx1
;
278 } else if (ResultIdx2
== CommuteAnyOperandIndex
) {
279 if (ResultIdx1
== CommutableOpIdx1
)
280 ResultIdx2
= CommutableOpIdx2
;
281 else if (ResultIdx1
== CommutableOpIdx2
)
282 ResultIdx2
= CommutableOpIdx1
;
286 // Check that the result operand indices match the given commutable
288 return (ResultIdx1
== CommutableOpIdx1
&& ResultIdx2
== CommutableOpIdx2
) ||
289 (ResultIdx1
== CommutableOpIdx2
&& ResultIdx2
== CommutableOpIdx1
);
294 bool TargetInstrInfo::findCommutedOpIndices(const MachineInstr
&MI
,
296 unsigned &SrcOpIdx2
) const {
297 assert(!MI
.isBundle() &&
298 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
300 const MCInstrDesc
&MCID
= MI
.getDesc();
301 if (!MCID
.isCommutable())
304 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
305 // is not true, then the target must implement this.
306 unsigned CommutableOpIdx1
= MCID
.getNumDefs();
307 unsigned CommutableOpIdx2
= CommutableOpIdx1
+ 1;
308 if (!fixCommutedOpIndices(SrcOpIdx1
, SrcOpIdx2
,
309 CommutableOpIdx1
, CommutableOpIdx2
))
312 if (!MI
.getOperand(SrcOpIdx1
).isReg() || !MI
.getOperand(SrcOpIdx2
).isReg())
318 bool TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr
&MI
) const {
319 if (!MI
.isTerminator()) return false;
321 // Conditional branch is a special case.
322 if (MI
.isBranch() && !MI
.isBarrier())
324 if (!MI
.isPredicable())
326 return !isPredicated(MI
);
329 bool TargetInstrInfo::PredicateInstruction(
330 MachineInstr
&MI
, ArrayRef
<MachineOperand
> Pred
) const {
331 bool MadeChange
= false;
333 assert(!MI
.isBundle() &&
334 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
336 const MCInstrDesc
&MCID
= MI
.getDesc();
337 if (!MI
.isPredicable())
340 for (unsigned j
= 0, i
= 0, e
= MI
.getNumOperands(); i
!= e
; ++i
) {
341 if (MCID
.operands()[i
].isPredicate()) {
342 MachineOperand
&MO
= MI
.getOperand(i
);
344 MO
.setReg(Pred
[j
].getReg());
346 } else if (MO
.isImm()) {
347 MO
.setImm(Pred
[j
].getImm());
349 } else if (MO
.isMBB()) {
350 MO
.setMBB(Pred
[j
].getMBB());
359 bool TargetInstrInfo::hasLoadFromStackSlot(
360 const MachineInstr
&MI
,
361 SmallVectorImpl
<const MachineMemOperand
*> &Accesses
) const {
362 size_t StartSize
= Accesses
.size();
363 for (MachineInstr::mmo_iterator o
= MI
.memoperands_begin(),
364 oe
= MI
.memoperands_end();
366 if ((*o
)->isLoad() &&
367 isa_and_nonnull
<FixedStackPseudoSourceValue
>((*o
)->getPseudoValue()))
368 Accesses
.push_back(*o
);
370 return Accesses
.size() != StartSize
;
373 bool TargetInstrInfo::hasStoreToStackSlot(
374 const MachineInstr
&MI
,
375 SmallVectorImpl
<const MachineMemOperand
*> &Accesses
) const {
376 size_t StartSize
= Accesses
.size();
377 for (MachineInstr::mmo_iterator o
= MI
.memoperands_begin(),
378 oe
= MI
.memoperands_end();
380 if ((*o
)->isStore() &&
381 isa_and_nonnull
<FixedStackPseudoSourceValue
>((*o
)->getPseudoValue()))
382 Accesses
.push_back(*o
);
384 return Accesses
.size() != StartSize
;
387 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass
*RC
,
388 unsigned SubIdx
, unsigned &Size
,
390 const MachineFunction
&MF
) const {
391 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
393 Size
= TRI
->getSpillSize(*RC
);
397 unsigned BitSize
= TRI
->getSubRegIdxSize(SubIdx
);
398 // Convert bit size to byte size.
402 int BitOffset
= TRI
->getSubRegIdxOffset(SubIdx
);
403 if (BitOffset
< 0 || BitOffset
% 8)
407 Offset
= (unsigned)BitOffset
/ 8;
409 assert(TRI
->getSpillSize(*RC
) >= (Offset
+ Size
) && "bad subregister range");
411 if (!MF
.getDataLayout().isLittleEndian()) {
412 Offset
= TRI
->getSpillSize(*RC
) - (Offset
+ Size
);
417 void TargetInstrInfo::reMaterialize(MachineBasicBlock
&MBB
,
418 MachineBasicBlock::iterator I
,
419 Register DestReg
, unsigned SubIdx
,
420 const MachineInstr
&Orig
,
421 const TargetRegisterInfo
&TRI
) const {
422 MachineInstr
*MI
= MBB
.getParent()->CloneMachineInstr(&Orig
);
423 MI
->substituteRegister(MI
->getOperand(0).getReg(), DestReg
, SubIdx
, TRI
);
427 bool TargetInstrInfo::produceSameValue(const MachineInstr
&MI0
,
428 const MachineInstr
&MI1
,
429 const MachineRegisterInfo
*MRI
) const {
430 return MI0
.isIdenticalTo(MI1
, MachineInstr::IgnoreVRegDefs
);
433 MachineInstr
&TargetInstrInfo::duplicate(MachineBasicBlock
&MBB
,
434 MachineBasicBlock::iterator InsertBefore
, const MachineInstr
&Orig
) const {
435 assert(!Orig
.isNotDuplicable() && "Instruction cannot be duplicated");
436 MachineFunction
&MF
= *MBB
.getParent();
437 return MF
.cloneMachineInstrBundle(MBB
, InsertBefore
, Orig
);
440 // If the COPY instruction in MI can be folded to a stack operation, return
441 // the register class to use.
442 static const TargetRegisterClass
*canFoldCopy(const MachineInstr
&MI
,
443 const TargetInstrInfo
&TII
,
445 assert(TII
.isCopyInstr(MI
) && "MI must be a COPY instruction");
446 if (MI
.getNumOperands() != 2)
448 assert(FoldIdx
<2 && "FoldIdx refers no nonexistent operand");
450 const MachineOperand
&FoldOp
= MI
.getOperand(FoldIdx
);
451 const MachineOperand
&LiveOp
= MI
.getOperand(1 - FoldIdx
);
453 if (FoldOp
.getSubReg() || LiveOp
.getSubReg())
456 Register FoldReg
= FoldOp
.getReg();
457 Register LiveReg
= LiveOp
.getReg();
459 assert(FoldReg
.isVirtual() && "Cannot fold physregs");
461 const MachineRegisterInfo
&MRI
= MI
.getMF()->getRegInfo();
462 const TargetRegisterClass
*RC
= MRI
.getRegClass(FoldReg
);
464 if (LiveOp
.getReg().isPhysical())
465 return RC
->contains(LiveOp
.getReg()) ? RC
: nullptr;
467 if (RC
->hasSubClassEq(MRI
.getRegClass(LiveReg
)))
470 // FIXME: Allow folding when register classes are memory compatible.
474 MCInst
TargetInstrInfo::getNop() const { llvm_unreachable("Not implemented"); }
476 std::pair
<unsigned, unsigned>
477 TargetInstrInfo::getPatchpointUnfoldableRange(const MachineInstr
&MI
) const {
478 switch (MI
.getOpcode()) {
479 case TargetOpcode::STACKMAP
:
480 // StackMapLiveValues are foldable
481 return std::make_pair(0, StackMapOpers(&MI
).getVarIdx());
482 case TargetOpcode::PATCHPOINT
:
483 // For PatchPoint, the call args are not foldable (even if reported in the
484 // stackmap e.g. via anyregcc).
485 return std::make_pair(0, PatchPointOpers(&MI
).getVarIdx());
486 case TargetOpcode::STATEPOINT
:
487 // For statepoints, fold deopt and gc arguments, but not call arguments.
488 return std::make_pair(MI
.getNumDefs(), StatepointOpers(&MI
).getVarIdx());
490 llvm_unreachable("unexpected stackmap opcode");
494 static MachineInstr
*foldPatchpoint(MachineFunction
&MF
, MachineInstr
&MI
,
495 ArrayRef
<unsigned> Ops
, int FrameIndex
,
496 const TargetInstrInfo
&TII
) {
497 unsigned StartIdx
= 0;
498 unsigned NumDefs
= 0;
499 // getPatchpointUnfoldableRange throws guarantee if MI is not a patchpoint.
500 std::tie(NumDefs
, StartIdx
) = TII
.getPatchpointUnfoldableRange(MI
);
502 unsigned DefToFoldIdx
= MI
.getNumOperands();
504 // Return false if any operands requested for folding are not foldable (not
505 // part of the stackmap's live values).
506 for (unsigned Op
: Ops
) {
508 assert(DefToFoldIdx
== MI
.getNumOperands() && "Folding multiple defs");
510 } else if (Op
< StartIdx
) {
513 if (MI
.getOperand(Op
).isTied())
517 MachineInstr
*NewMI
=
518 MF
.CreateMachineInstr(TII
.get(MI
.getOpcode()), MI
.getDebugLoc(), true);
519 MachineInstrBuilder
MIB(MF
, NewMI
);
521 // No need to fold return, the meta data, and function arguments
522 for (unsigned i
= 0; i
< StartIdx
; ++i
)
523 if (i
!= DefToFoldIdx
)
524 MIB
.add(MI
.getOperand(i
));
526 for (unsigned i
= StartIdx
, e
= MI
.getNumOperands(); i
< e
; ++i
) {
527 MachineOperand
&MO
= MI
.getOperand(i
);
529 (void)MI
.isRegTiedToDefOperand(i
, &TiedTo
);
531 if (is_contained(Ops
, i
)) {
532 assert(TiedTo
== e
&& "Cannot fold tied operands");
534 unsigned SpillOffset
;
535 // Compute the spill slot size and offset.
536 const TargetRegisterClass
*RC
=
537 MF
.getRegInfo().getRegClass(MO
.getReg());
539 TII
.getStackSlotRange(RC
, MO
.getSubReg(), SpillSize
, SpillOffset
, MF
);
541 report_fatal_error("cannot spill patchpoint subregister operand");
542 MIB
.addImm(StackMaps::IndirectMemRefOp
);
543 MIB
.addImm(SpillSize
);
544 MIB
.addFrameIndex(FrameIndex
);
545 MIB
.addImm(SpillOffset
);
549 assert(TiedTo
< NumDefs
&& "Bad tied operand");
550 if (TiedTo
> DefToFoldIdx
)
552 NewMI
->tieOperands(TiedTo
, NewMI
->getNumOperands() - 1);
559 MachineInstr
*TargetInstrInfo::foldMemoryOperand(MachineInstr
&MI
,
560 ArrayRef
<unsigned> Ops
, int FI
,
562 VirtRegMap
*VRM
) const {
563 auto Flags
= MachineMemOperand::MONone
;
564 for (unsigned OpIdx
: Ops
)
565 Flags
|= MI
.getOperand(OpIdx
).isDef() ? MachineMemOperand::MOStore
566 : MachineMemOperand::MOLoad
;
568 MachineBasicBlock
*MBB
= MI
.getParent();
569 assert(MBB
&& "foldMemoryOperand needs an inserted instruction");
570 MachineFunction
&MF
= *MBB
->getParent();
572 // If we're not folding a load into a subreg, the size of the load is the
573 // size of the spill slot. But if we are, we need to figure out what the
574 // actual load size is.
576 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
577 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
579 if (Flags
& MachineMemOperand::MOStore
) {
580 MemSize
= MFI
.getObjectSize(FI
);
582 for (unsigned OpIdx
: Ops
) {
583 int64_t OpSize
= MFI
.getObjectSize(FI
);
585 if (auto SubReg
= MI
.getOperand(OpIdx
).getSubReg()) {
586 unsigned SubRegSize
= TRI
->getSubRegIdxSize(SubReg
);
587 if (SubRegSize
> 0 && !(SubRegSize
% 8))
588 OpSize
= SubRegSize
/ 8;
591 MemSize
= std::max(MemSize
, OpSize
);
595 assert(MemSize
&& "Did not expect a zero-sized stack slot");
597 MachineInstr
*NewMI
= nullptr;
599 if (MI
.getOpcode() == TargetOpcode::STACKMAP
||
600 MI
.getOpcode() == TargetOpcode::PATCHPOINT
||
601 MI
.getOpcode() == TargetOpcode::STATEPOINT
) {
602 // Fold stackmap/patchpoint.
603 NewMI
= foldPatchpoint(MF
, MI
, Ops
, FI
, *this);
605 MBB
->insert(MI
, NewMI
);
607 // Ask the target to do the actual folding.
608 NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, MI
, FI
, LIS
, VRM
);
612 NewMI
->setMemRefs(MF
, MI
.memoperands());
613 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
614 assert((!(Flags
& MachineMemOperand::MOStore
) ||
615 NewMI
->mayStore()) &&
616 "Folded a def to a non-store!");
617 assert((!(Flags
& MachineMemOperand::MOLoad
) ||
619 "Folded a use to a non-load!");
620 assert(MFI
.getObjectOffset(FI
) != -1);
621 MachineMemOperand
*MMO
=
622 MF
.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF
, FI
),
623 Flags
, MemSize
, MFI
.getObjectAlign(FI
));
624 NewMI
->addMemOperand(MF
, MMO
);
626 // The pass "x86 speculative load hardening" always attaches symbols to
627 // call instructions. We need copy it form old instruction.
628 NewMI
->cloneInstrSymbols(MF
, MI
);
633 // Straight COPY may fold as load/store.
634 if (!isCopyInstr(MI
) || Ops
.size() != 1)
637 const TargetRegisterClass
*RC
= canFoldCopy(MI
, *this, Ops
[0]);
641 const MachineOperand
&MO
= MI
.getOperand(1 - Ops
[0]);
642 MachineBasicBlock::iterator Pos
= MI
;
644 if (Flags
== MachineMemOperand::MOStore
)
645 storeRegToStackSlot(*MBB
, Pos
, MO
.getReg(), MO
.isKill(), FI
, RC
, TRI
,
648 loadRegFromStackSlot(*MBB
, Pos
, MO
.getReg(), FI
, RC
, TRI
, Register());
652 MachineInstr
*TargetInstrInfo::foldMemoryOperand(MachineInstr
&MI
,
653 ArrayRef
<unsigned> Ops
,
654 MachineInstr
&LoadMI
,
655 LiveIntervals
*LIS
) const {
656 assert(LoadMI
.canFoldAsLoad() && "LoadMI isn't foldable!");
658 for (unsigned OpIdx
: Ops
)
659 assert(MI
.getOperand(OpIdx
).isUse() && "Folding load into def!");
662 MachineBasicBlock
&MBB
= *MI
.getParent();
663 MachineFunction
&MF
= *MBB
.getParent();
665 // Ask the target to do the actual folding.
666 MachineInstr
*NewMI
= nullptr;
669 if ((MI
.getOpcode() == TargetOpcode::STACKMAP
||
670 MI
.getOpcode() == TargetOpcode::PATCHPOINT
||
671 MI
.getOpcode() == TargetOpcode::STATEPOINT
) &&
672 isLoadFromStackSlot(LoadMI
, FrameIndex
)) {
673 // Fold stackmap/patchpoint.
674 NewMI
= foldPatchpoint(MF
, MI
, Ops
, FrameIndex
, *this);
676 NewMI
= &*MBB
.insert(MI
, NewMI
);
678 // Ask the target to do the actual folding.
679 NewMI
= foldMemoryOperandImpl(MF
, MI
, Ops
, MI
, LoadMI
, LIS
);
685 // Copy the memoperands from the load to the folded instruction.
686 if (MI
.memoperands_empty()) {
687 NewMI
->setMemRefs(MF
, LoadMI
.memoperands());
689 // Handle the rare case of folding multiple loads.
690 NewMI
->setMemRefs(MF
, MI
.memoperands());
691 for (MachineInstr::mmo_iterator I
= LoadMI
.memoperands_begin(),
692 E
= LoadMI
.memoperands_end();
694 NewMI
->addMemOperand(MF
, *I
);
700 /// transferImplicitOperands - MI is a pseudo-instruction, and the lowered
701 /// replacement instructions immediately precede it. Copy any implicit
702 /// operands from MI to the replacement instruction.
703 static void transferImplicitOperands(MachineInstr
*MI
,
704 const TargetRegisterInfo
*TRI
) {
705 MachineBasicBlock::iterator CopyMI
= MI
;
708 Register DstReg
= MI
->getOperand(0).getReg();
709 for (const MachineOperand
&MO
: MI
->implicit_operands()) {
710 CopyMI
->addOperand(MO
);
712 // Be conservative about preserving kills when subregister defs are
713 // involved. If there was implicit kill of a super-register overlapping the
714 // copy result, we would kill the subregisters previous copies defined.
716 if (MO
.isKill() && TRI
->regsOverlap(DstReg
, MO
.getReg()))
717 CopyMI
->getOperand(CopyMI
->getNumOperands() - 1).setIsKill(false);
721 void TargetInstrInfo::lowerCopy(MachineInstr
*MI
,
722 const TargetRegisterInfo
*TRI
) const {
723 if (MI
->allDefsAreDead()) {
724 MI
->setDesc(get(TargetOpcode::KILL
));
728 MachineOperand
&DstMO
= MI
->getOperand(0);
729 MachineOperand
&SrcMO
= MI
->getOperand(1);
731 bool IdentityCopy
= (SrcMO
.getReg() == DstMO
.getReg());
732 if (IdentityCopy
|| SrcMO
.isUndef()) {
733 // No need to insert an identity copy instruction, but replace with a KILL
734 // if liveness is changed.
735 if (SrcMO
.isUndef() || MI
->getNumOperands() > 2) {
736 // We must make sure the super-register gets killed. Replace the
737 // instruction with KILL.
738 MI
->setDesc(get(TargetOpcode::KILL
));
741 // Vanilla identity copy.
742 MI
->eraseFromParent();
746 copyPhysReg(*MI
->getParent(), MI
, MI
->getDebugLoc(), DstMO
.getReg(),
747 SrcMO
.getReg(), SrcMO
.isKill());
749 if (MI
->getNumOperands() > 2)
750 transferImplicitOperands(MI
, TRI
);
751 MI
->eraseFromParent();
755 bool TargetInstrInfo::hasReassociableOperands(
756 const MachineInstr
&Inst
, const MachineBasicBlock
*MBB
) const {
757 const MachineOperand
&Op1
= Inst
.getOperand(1);
758 const MachineOperand
&Op2
= Inst
.getOperand(2);
759 const MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
761 // We need virtual register definitions for the operands that we will
763 MachineInstr
*MI1
= nullptr;
764 MachineInstr
*MI2
= nullptr;
765 if (Op1
.isReg() && Op1
.getReg().isVirtual())
766 MI1
= MRI
.getUniqueVRegDef(Op1
.getReg());
767 if (Op2
.isReg() && Op2
.getReg().isVirtual())
768 MI2
= MRI
.getUniqueVRegDef(Op2
.getReg());
770 // And at least one operand must be defined in MBB.
771 return MI1
&& MI2
&& (MI1
->getParent() == MBB
|| MI2
->getParent() == MBB
);
774 bool TargetInstrInfo::areOpcodesEqualOrInverse(unsigned Opcode1
,
775 unsigned Opcode2
) const {
776 return Opcode1
== Opcode2
|| getInverseOpcode(Opcode1
) == Opcode2
;
779 bool TargetInstrInfo::hasReassociableSibling(const MachineInstr
&Inst
,
780 bool &Commuted
) const {
781 const MachineBasicBlock
*MBB
= Inst
.getParent();
782 const MachineRegisterInfo
&MRI
= MBB
->getParent()->getRegInfo();
783 MachineInstr
*MI1
= MRI
.getUniqueVRegDef(Inst
.getOperand(1).getReg());
784 MachineInstr
*MI2
= MRI
.getUniqueVRegDef(Inst
.getOperand(2).getReg());
785 unsigned Opcode
= Inst
.getOpcode();
787 // If only one operand has the same or inverse opcode and it's the second
788 // source operand, the operands must be commuted.
789 Commuted
= !areOpcodesEqualOrInverse(Opcode
, MI1
->getOpcode()) &&
790 areOpcodesEqualOrInverse(Opcode
, MI2
->getOpcode());
794 // 1. The previous instruction must be the same type as Inst.
795 // 2. The previous instruction must also be associative/commutative or be the
796 // inverse of such an operation (this can be different even for
797 // instructions with the same opcode if traits like fast-math-flags are
799 // 3. The previous instruction must have virtual register definitions for its
800 // operands in the same basic block as Inst.
801 // 4. The previous instruction's result must only be used by Inst.
802 return areOpcodesEqualOrInverse(Opcode
, MI1
->getOpcode()) &&
803 (isAssociativeAndCommutative(*MI1
) ||
804 isAssociativeAndCommutative(*MI1
, /* Invert */ true)) &&
805 hasReassociableOperands(*MI1
, MBB
) &&
806 MRI
.hasOneNonDBGUse(MI1
->getOperand(0).getReg());
809 // 1. The operation must be associative and commutative or be the inverse of
810 // such an operation.
811 // 2. The instruction must have virtual register definitions for its
812 // operands in the same basic block.
813 // 3. The instruction must have a reassociable sibling.
814 bool TargetInstrInfo::isReassociationCandidate(const MachineInstr
&Inst
,
815 bool &Commuted
) const {
816 return (isAssociativeAndCommutative(Inst
) ||
817 isAssociativeAndCommutative(Inst
, /* Invert */ true)) &&
818 hasReassociableOperands(Inst
, Inst
.getParent()) &&
819 hasReassociableSibling(Inst
, Commuted
);
822 // The concept of the reassociation pass is that these operations can benefit
823 // from this kind of transformation:
833 // breaking the dependency between A and B, allowing them to be executed in
834 // parallel (or back-to-back in a pipeline) instead of depending on each other.
836 // FIXME: This has the potential to be expensive (compile time) while not
837 // improving the code at all. Some ways to limit the overhead:
838 // 1. Track successful transforms; bail out if hit rate gets too low.
839 // 2. Only enable at -O3 or some other non-default optimization level.
840 // 3. Pre-screen pattern candidates here: if an operand of the previous
841 // instruction is known to not increase the critical path, then don't match
843 bool TargetInstrInfo::getMachineCombinerPatterns(
844 MachineInstr
&Root
, SmallVectorImpl
<MachineCombinerPattern
> &Patterns
,
845 bool DoRegPressureReduce
) const {
847 if (isReassociationCandidate(Root
, Commute
)) {
848 // We found a sequence of instructions that may be suitable for a
849 // reassociation of operands to increase ILP. Specify each commutation
850 // possibility for the Prev instruction in the sequence and let the
851 // machine combiner decide if changing the operands is worthwhile.
853 Patterns
.push_back(MachineCombinerPattern::REASSOC_AX_YB
);
854 Patterns
.push_back(MachineCombinerPattern::REASSOC_XA_YB
);
856 Patterns
.push_back(MachineCombinerPattern::REASSOC_AX_BY
);
857 Patterns
.push_back(MachineCombinerPattern::REASSOC_XA_BY
);
865 /// Return true when a code sequence can improve loop throughput.
867 TargetInstrInfo::isThroughputPattern(MachineCombinerPattern Pattern
) const {
871 std::pair
<unsigned, unsigned>
872 TargetInstrInfo::getReassociationOpcodes(MachineCombinerPattern Pattern
,
873 const MachineInstr
&Root
,
874 const MachineInstr
&Prev
) const {
875 bool AssocCommutRoot
= isAssociativeAndCommutative(Root
);
876 bool AssocCommutPrev
= isAssociativeAndCommutative(Prev
);
878 // Early exit if both opcodes are associative and commutative. It's a trivial
879 // reassociation when we only change operands order. In this case opcodes are
880 // not required to have inverse versions.
881 if (AssocCommutRoot
&& AssocCommutPrev
) {
882 assert(Root
.getOpcode() == Prev
.getOpcode() && "Expected to be equal");
883 return std::make_pair(Root
.getOpcode(), Root
.getOpcode());
886 // At least one instruction is not associative or commutative.
887 // Since we have matched one of the reassociation patterns, we expect that the
888 // instructions' opcodes are equal or one of them is the inversion of the
890 assert(areOpcodesEqualOrInverse(Root
.getOpcode(), Prev
.getOpcode()) &&
891 "Incorrectly matched pattern");
892 unsigned AssocCommutOpcode
= Root
.getOpcode();
893 unsigned InverseOpcode
= *getInverseOpcode(Root
.getOpcode());
894 if (!AssocCommutRoot
)
895 std::swap(AssocCommutOpcode
, InverseOpcode
);
897 // The transformation rule (`+` is any associative and commutative binary
898 // operation, `-` is the inverse):
900 // (A + X) + Y => A + (X + Y)
901 // (A + X) - Y => A + (X - Y)
902 // (A - X) + Y => A - (X - Y)
903 // (A - X) - Y => A - (X + Y)
905 // (X + A) + Y => (X + Y) + A
906 // (X + A) - Y => (X - Y) + A
907 // (X - A) + Y => (X + Y) - A
908 // (X - A) - Y => (X - Y) - A
910 // Y + (A + X) => (Y + X) + A
911 // Y - (A + X) => (Y - X) - A
912 // Y + (A - X) => (Y - X) + A
913 // Y - (A - X) => (Y + X) - A
915 // Y + (X + A) => (Y + X) + A
916 // Y - (X + A) => (Y - X) - A
917 // Y + (X - A) => (Y + X) - A
918 // Y - (X - A) => (Y - X) + A
921 llvm_unreachable("Unexpected pattern");
922 case MachineCombinerPattern::REASSOC_AX_BY
:
923 if (!AssocCommutRoot
&& AssocCommutPrev
)
924 return {AssocCommutOpcode
, InverseOpcode
};
925 if (AssocCommutRoot
&& !AssocCommutPrev
)
926 return {InverseOpcode
, InverseOpcode
};
927 if (!AssocCommutRoot
&& !AssocCommutPrev
)
928 return {InverseOpcode
, AssocCommutOpcode
};
930 case MachineCombinerPattern::REASSOC_XA_BY
:
931 if (!AssocCommutRoot
&& AssocCommutPrev
)
932 return {AssocCommutOpcode
, InverseOpcode
};
933 if (AssocCommutRoot
&& !AssocCommutPrev
)
934 return {InverseOpcode
, AssocCommutOpcode
};
935 if (!AssocCommutRoot
&& !AssocCommutPrev
)
936 return {InverseOpcode
, InverseOpcode
};
938 case MachineCombinerPattern::REASSOC_AX_YB
:
939 if (!AssocCommutRoot
&& AssocCommutPrev
)
940 return {InverseOpcode
, InverseOpcode
};
941 if (AssocCommutRoot
&& !AssocCommutPrev
)
942 return {AssocCommutOpcode
, InverseOpcode
};
943 if (!AssocCommutRoot
&& !AssocCommutPrev
)
944 return {InverseOpcode
, AssocCommutOpcode
};
946 case MachineCombinerPattern::REASSOC_XA_YB
:
947 if (!AssocCommutRoot
&& AssocCommutPrev
)
948 return {InverseOpcode
, InverseOpcode
};
949 if (AssocCommutRoot
&& !AssocCommutPrev
)
950 return {InverseOpcode
, AssocCommutOpcode
};
951 if (!AssocCommutRoot
&& !AssocCommutPrev
)
952 return {AssocCommutOpcode
, InverseOpcode
};
955 llvm_unreachable("Unhandled combination");
958 // Return a pair of boolean flags showing if the new root and new prev operands
959 // must be swapped. See visual example of the rule in
960 // TargetInstrInfo::getReassociationOpcodes.
961 static std::pair
<bool, bool> mustSwapOperands(MachineCombinerPattern Pattern
) {
964 llvm_unreachable("Unexpected pattern");
965 case MachineCombinerPattern::REASSOC_AX_BY
:
966 return {false, false};
967 case MachineCombinerPattern::REASSOC_XA_BY
:
968 return {true, false};
969 case MachineCombinerPattern::REASSOC_AX_YB
:
971 case MachineCombinerPattern::REASSOC_XA_YB
:
976 /// Attempt the reassociation transformation to reduce critical path length.
977 /// See the above comments before getMachineCombinerPatterns().
978 void TargetInstrInfo::reassociateOps(
979 MachineInstr
&Root
, MachineInstr
&Prev
,
980 MachineCombinerPattern Pattern
,
981 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
982 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
983 DenseMap
<unsigned, unsigned> &InstrIdxForVirtReg
) const {
984 MachineFunction
*MF
= Root
.getMF();
985 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
986 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
987 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
988 const TargetRegisterClass
*RC
= Root
.getRegClassConstraint(0, TII
, TRI
);
990 // This array encodes the operand index for each parameter because the
991 // operands may be commuted. Each row corresponds to a pattern value,
992 // and each column specifies the index of A, B, X, Y.
993 unsigned OpIdx
[4][4] = {
1002 case MachineCombinerPattern::REASSOC_AX_BY
: Row
= 0; break;
1003 case MachineCombinerPattern::REASSOC_AX_YB
: Row
= 1; break;
1004 case MachineCombinerPattern::REASSOC_XA_BY
: Row
= 2; break;
1005 case MachineCombinerPattern::REASSOC_XA_YB
: Row
= 3; break;
1006 default: llvm_unreachable("unexpected MachineCombinerPattern");
1009 MachineOperand
&OpA
= Prev
.getOperand(OpIdx
[Row
][0]);
1010 MachineOperand
&OpB
= Root
.getOperand(OpIdx
[Row
][1]);
1011 MachineOperand
&OpX
= Prev
.getOperand(OpIdx
[Row
][2]);
1012 MachineOperand
&OpY
= Root
.getOperand(OpIdx
[Row
][3]);
1013 MachineOperand
&OpC
= Root
.getOperand(0);
1015 Register RegA
= OpA
.getReg();
1016 Register RegB
= OpB
.getReg();
1017 Register RegX
= OpX
.getReg();
1018 Register RegY
= OpY
.getReg();
1019 Register RegC
= OpC
.getReg();
1021 if (RegA
.isVirtual())
1022 MRI
.constrainRegClass(RegA
, RC
);
1023 if (RegB
.isVirtual())
1024 MRI
.constrainRegClass(RegB
, RC
);
1025 if (RegX
.isVirtual())
1026 MRI
.constrainRegClass(RegX
, RC
);
1027 if (RegY
.isVirtual())
1028 MRI
.constrainRegClass(RegY
, RC
);
1029 if (RegC
.isVirtual())
1030 MRI
.constrainRegClass(RegC
, RC
);
1032 // Create a new virtual register for the result of (X op Y) instead of
1033 // recycling RegB because the MachineCombiner's computation of the critical
1034 // path requires a new register definition rather than an existing one.
1035 Register NewVR
= MRI
.createVirtualRegister(RC
);
1036 InstrIdxForVirtReg
.insert(std::make_pair(NewVR
, 0));
1038 auto [NewRootOpc
, NewPrevOpc
] = getReassociationOpcodes(Pattern
, Root
, Prev
);
1039 bool KillA
= OpA
.isKill();
1040 bool KillX
= OpX
.isKill();
1041 bool KillY
= OpY
.isKill();
1042 bool KillNewVR
= true;
1044 auto [SwapRootOperands
, SwapPrevOperands
] = mustSwapOperands(Pattern
);
1046 if (SwapPrevOperands
) {
1047 std::swap(RegX
, RegY
);
1048 std::swap(KillX
, KillY
);
1051 // Create new instructions for insertion.
1052 MachineInstrBuilder MIB1
=
1053 BuildMI(*MF
, MIMetadata(Prev
), TII
->get(NewPrevOpc
), NewVR
)
1054 .addReg(RegX
, getKillRegState(KillX
))
1055 .addReg(RegY
, getKillRegState(KillY
))
1056 .setMIFlags(Prev
.getFlags());
1058 if (SwapRootOperands
) {
1059 std::swap(RegA
, NewVR
);
1060 std::swap(KillA
, KillNewVR
);
1063 MachineInstrBuilder MIB2
=
1064 BuildMI(*MF
, MIMetadata(Root
), TII
->get(NewRootOpc
), RegC
)
1065 .addReg(RegA
, getKillRegState(KillA
))
1066 .addReg(NewVR
, getKillRegState(KillNewVR
))
1067 .setMIFlags(Root
.getFlags());
1069 setSpecialOperandAttr(Root
, Prev
, *MIB1
, *MIB2
);
1071 // Record new instructions for insertion and old instructions for deletion.
1072 InsInstrs
.push_back(MIB1
);
1073 InsInstrs
.push_back(MIB2
);
1074 DelInstrs
.push_back(&Prev
);
1075 DelInstrs
.push_back(&Root
);
1078 // B = A op X (Prev)
1079 // C = B op Y (Root)
1081 // B = X op Y (MIB1)
1082 // C = A op B (MIB2)
1083 // C has the same value as before, B doesn't; as such, keep the debug number
1084 // of C but not of B.
1085 if (unsigned OldRootNum
= Root
.peekDebugInstrNum())
1086 MIB2
.getInstr()->setDebugInstrNum(OldRootNum
);
1089 void TargetInstrInfo::genAlternativeCodeSequence(
1090 MachineInstr
&Root
, MachineCombinerPattern Pattern
,
1091 SmallVectorImpl
<MachineInstr
*> &InsInstrs
,
1092 SmallVectorImpl
<MachineInstr
*> &DelInstrs
,
1093 DenseMap
<unsigned, unsigned> &InstIdxForVirtReg
) const {
1094 MachineRegisterInfo
&MRI
= Root
.getMF()->getRegInfo();
1096 // Select the previous instruction in the sequence based on the input pattern.
1097 MachineInstr
*Prev
= nullptr;
1099 case MachineCombinerPattern::REASSOC_AX_BY
:
1100 case MachineCombinerPattern::REASSOC_XA_BY
:
1101 Prev
= MRI
.getUniqueVRegDef(Root
.getOperand(1).getReg());
1103 case MachineCombinerPattern::REASSOC_AX_YB
:
1104 case MachineCombinerPattern::REASSOC_XA_YB
:
1105 Prev
= MRI
.getUniqueVRegDef(Root
.getOperand(2).getReg());
1108 llvm_unreachable("Unknown pattern for machine combiner");
1111 // Don't reassociate if Prev and Root are in different blocks.
1112 if (Prev
->getParent() != Root
.getParent())
1115 reassociateOps(Root
, *Prev
, Pattern
, InsInstrs
, DelInstrs
, InstIdxForVirtReg
);
1118 MachineTraceStrategy
TargetInstrInfo::getMachineCombinerTraceStrategy() const {
1119 return MachineTraceStrategy::TS_MinInstrCount
;
1122 bool TargetInstrInfo::isReallyTriviallyReMaterializableGeneric(
1123 const MachineInstr
&MI
) const {
1124 const MachineFunction
&MF
= *MI
.getMF();
1125 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
1127 // Remat clients assume operand 0 is the defined register.
1128 if (!MI
.getNumOperands() || !MI
.getOperand(0).isReg())
1130 Register DefReg
= MI
.getOperand(0).getReg();
1132 // A sub-register definition can only be rematerialized if the instruction
1133 // doesn't read the other parts of the register. Otherwise it is really a
1134 // read-modify-write operation on the full virtual register which cannot be
1136 if (DefReg
.isVirtual() && MI
.getOperand(0).getSubReg() &&
1137 MI
.readsVirtualRegister(DefReg
))
1140 // A load from a fixed stack slot can be rematerialized. This may be
1141 // redundant with subsequent checks, but it's target-independent,
1142 // simple, and a common case.
1144 if (isLoadFromStackSlot(MI
, FrameIdx
) &&
1145 MF
.getFrameInfo().isImmutableObjectIndex(FrameIdx
))
1148 // Avoid instructions obviously unsafe for remat.
1149 if (MI
.isNotDuplicable() || MI
.mayStore() || MI
.mayRaiseFPException() ||
1150 MI
.hasUnmodeledSideEffects())
1153 // Don't remat inline asm. We have no idea how expensive it is
1154 // even if it's side effect free.
1155 if (MI
.isInlineAsm())
1158 // Avoid instructions which load from potentially varying memory.
1159 if (MI
.mayLoad() && !MI
.isDereferenceableInvariantLoad())
1162 // If any of the registers accessed are non-constant, conservatively assume
1163 // the instruction is not rematerializable.
1164 for (const MachineOperand
&MO
: MI
.operands()) {
1165 if (!MO
.isReg()) continue;
1166 Register Reg
= MO
.getReg();
1170 // Check for a well-behaved physical register.
1171 if (Reg
.isPhysical()) {
1173 // If the physreg has no defs anywhere, it's just an ambient register
1174 // and we can freely move its uses. Alternatively, if it's allocatable,
1175 // it could get allocated to something with a def during allocation.
1176 if (!MRI
.isConstantPhysReg(Reg
))
1179 // A physreg def. We can't remat it.
1185 // Only allow one virtual-register def. There may be multiple defs of the
1186 // same virtual register, though.
1187 if (MO
.isDef() && Reg
!= DefReg
)
1190 // Don't allow any virtual-register uses. Rematting an instruction with
1191 // virtual register uses would length the live ranges of the uses, which
1192 // is not necessarily a good idea, certainly not "trivial".
1197 // Everything checked out.
1201 int TargetInstrInfo::getSPAdjust(const MachineInstr
&MI
) const {
1202 const MachineFunction
*MF
= MI
.getMF();
1203 const TargetFrameLowering
*TFI
= MF
->getSubtarget().getFrameLowering();
1204 bool StackGrowsDown
=
1205 TFI
->getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown
;
1207 unsigned FrameSetupOpcode
= getCallFrameSetupOpcode();
1208 unsigned FrameDestroyOpcode
= getCallFrameDestroyOpcode();
1210 if (!isFrameInstr(MI
))
1213 int SPAdj
= TFI
->alignSPAdjust(getFrameSize(MI
));
1215 if ((!StackGrowsDown
&& MI
.getOpcode() == FrameSetupOpcode
) ||
1216 (StackGrowsDown
&& MI
.getOpcode() == FrameDestroyOpcode
))
1222 /// isSchedulingBoundary - Test if the given instruction should be
1223 /// considered a scheduling boundary. This primarily includes labels
1224 /// and terminators.
1225 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr
&MI
,
1226 const MachineBasicBlock
*MBB
,
1227 const MachineFunction
&MF
) const {
1228 // Terminators and labels can't be scheduled around.
1229 if (MI
.isTerminator() || MI
.isPosition())
1232 // INLINEASM_BR can jump to another block
1233 if (MI
.getOpcode() == TargetOpcode::INLINEASM_BR
)
1236 // Don't attempt to schedule around any instruction that defines
1237 // a stack-oriented pointer, as it's unlikely to be profitable. This
1238 // saves compile time, because it doesn't require every single
1239 // stack slot reference to depend on the instruction that does the
1241 const TargetLowering
&TLI
= *MF
.getSubtarget().getTargetLowering();
1242 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
1243 return MI
.modifiesRegister(TLI
.getStackPointerRegisterToSaveRestore(), TRI
);
1246 // Provide a global flag for disabling the PreRA hazard recognizer that targets
1247 // may choose to honor.
1248 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
1249 return !DisableHazardRecognizer
;
1252 // Default implementation of CreateTargetRAHazardRecognizer.
1253 ScheduleHazardRecognizer
*TargetInstrInfo::
1254 CreateTargetHazardRecognizer(const TargetSubtargetInfo
*STI
,
1255 const ScheduleDAG
*DAG
) const {
1256 // Dummy hazard recognizer allows all instructions to issue.
1257 return new ScheduleHazardRecognizer();
1260 // Default implementation of CreateTargetMIHazardRecognizer.
1261 ScheduleHazardRecognizer
*TargetInstrInfo::CreateTargetMIHazardRecognizer(
1262 const InstrItineraryData
*II
, const ScheduleDAGMI
*DAG
) const {
1263 return new ScoreboardHazardRecognizer(II
, DAG
, "machine-scheduler");
1266 // Default implementation of CreateTargetPostRAHazardRecognizer.
1267 ScheduleHazardRecognizer
*TargetInstrInfo::
1268 CreateTargetPostRAHazardRecognizer(const InstrItineraryData
*II
,
1269 const ScheduleDAG
*DAG
) const {
1270 return new ScoreboardHazardRecognizer(II
, DAG
, "post-RA-sched");
1273 // Default implementation of getMemOperandWithOffset.
1274 bool TargetInstrInfo::getMemOperandWithOffset(
1275 const MachineInstr
&MI
, const MachineOperand
*&BaseOp
, int64_t &Offset
,
1276 bool &OffsetIsScalable
, const TargetRegisterInfo
*TRI
) const {
1277 SmallVector
<const MachineOperand
*, 4> BaseOps
;
1279 if (!getMemOperandsWithOffsetWidth(MI
, BaseOps
, Offset
, OffsetIsScalable
,
1281 BaseOps
.size() != 1)
1283 BaseOp
= BaseOps
.front();
1287 //===----------------------------------------------------------------------===//
1288 // SelectionDAG latency interface.
1289 //===----------------------------------------------------------------------===//
1292 TargetInstrInfo::getOperandLatency(const InstrItineraryData
*ItinData
,
1293 SDNode
*DefNode
, unsigned DefIdx
,
1294 SDNode
*UseNode
, unsigned UseIdx
) const {
1295 if (!ItinData
|| ItinData
->isEmpty())
1298 if (!DefNode
->isMachineOpcode())
1301 unsigned DefClass
= get(DefNode
->getMachineOpcode()).getSchedClass();
1302 if (!UseNode
->isMachineOpcode())
1303 return ItinData
->getOperandCycle(DefClass
, DefIdx
);
1304 unsigned UseClass
= get(UseNode
->getMachineOpcode()).getSchedClass();
1305 return ItinData
->getOperandLatency(DefClass
, DefIdx
, UseClass
, UseIdx
);
1308 int TargetInstrInfo::getInstrLatency(const InstrItineraryData
*ItinData
,
1310 if (!ItinData
|| ItinData
->isEmpty())
1313 if (!N
->isMachineOpcode())
1316 return ItinData
->getStageLatency(get(N
->getMachineOpcode()).getSchedClass());
1319 //===----------------------------------------------------------------------===//
1320 // MachineInstr latency interface.
1321 //===----------------------------------------------------------------------===//
1323 unsigned TargetInstrInfo::getNumMicroOps(const InstrItineraryData
*ItinData
,
1324 const MachineInstr
&MI
) const {
1325 if (!ItinData
|| ItinData
->isEmpty())
1328 unsigned Class
= MI
.getDesc().getSchedClass();
1329 int UOps
= ItinData
->Itineraries
[Class
].NumMicroOps
;
1333 // The # of u-ops is dynamically determined. The specific target should
1334 // override this function to return the right number.
1338 /// Return the default expected latency for a def based on it's opcode.
1339 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel
&SchedModel
,
1340 const MachineInstr
&DefMI
) const {
1341 if (DefMI
.isTransient())
1343 if (DefMI
.mayLoad())
1344 return SchedModel
.LoadLatency
;
1345 if (isHighLatencyDef(DefMI
.getOpcode()))
1346 return SchedModel
.HighLatency
;
1350 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr
&) const {
1354 unsigned TargetInstrInfo::getInstrLatency(const InstrItineraryData
*ItinData
,
1355 const MachineInstr
&MI
,
1356 unsigned *PredCost
) const {
1357 // Default to one cycle for no itinerary. However, an "empty" itinerary may
1358 // still have a MinLatency property, which getStageLatency checks.
1360 return MI
.mayLoad() ? 2 : 1;
1362 return ItinData
->getStageLatency(MI
.getDesc().getSchedClass());
1365 bool TargetInstrInfo::hasLowDefLatency(const TargetSchedModel
&SchedModel
,
1366 const MachineInstr
&DefMI
,
1367 unsigned DefIdx
) const {
1368 const InstrItineraryData
*ItinData
= SchedModel
.getInstrItineraries();
1369 if (!ItinData
|| ItinData
->isEmpty())
1372 unsigned DefClass
= DefMI
.getDesc().getSchedClass();
1373 int DefCycle
= ItinData
->getOperandCycle(DefClass
, DefIdx
);
1374 return (DefCycle
!= -1 && DefCycle
<= 1);
1377 std::optional
<ParamLoadedValue
>
1378 TargetInstrInfo::describeLoadedValue(const MachineInstr
&MI
,
1379 Register Reg
) const {
1380 const MachineFunction
*MF
= MI
.getMF();
1381 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
1382 DIExpression
*Expr
= DIExpression::get(MF
->getFunction().getContext(), {});
1384 bool OffsetIsScalable
;
1386 // To simplify the sub-register handling, verify that we only need to
1387 // consider physical registers.
1388 assert(MF
->getProperties().hasProperty(
1389 MachineFunctionProperties::Property::NoVRegs
));
1391 if (auto DestSrc
= isCopyInstr(MI
)) {
1392 Register DestReg
= DestSrc
->Destination
->getReg();
1394 // If the copy destination is the forwarding reg, describe the forwarding
1395 // reg using the copy source as the backup location. Example:
1398 // call callee(x0) ; x0 described as x7
1400 return ParamLoadedValue(*DestSrc
->Source
, Expr
);
1402 // If the target's hook couldn't describe this copy, give up.
1403 return std::nullopt
;
1404 } else if (auto RegImm
= isAddImmediate(MI
, Reg
)) {
1405 Register SrcReg
= RegImm
->Reg
;
1406 Offset
= RegImm
->Imm
;
1407 Expr
= DIExpression::prepend(Expr
, DIExpression::ApplyOffset
, Offset
);
1408 return ParamLoadedValue(MachineOperand::CreateReg(SrcReg
, false), Expr
);
1409 } else if (MI
.hasOneMemOperand()) {
1410 // Only describe memory which provably does not escape the function. As
1411 // described in llvm.org/PR43343, escaped memory may be clobbered by the
1412 // callee (or by another thread).
1413 const auto &TII
= MF
->getSubtarget().getInstrInfo();
1414 const MachineFrameInfo
&MFI
= MF
->getFrameInfo();
1415 const MachineMemOperand
*MMO
= MI
.memoperands()[0];
1416 const PseudoSourceValue
*PSV
= MMO
->getPseudoValue();
1418 // If the address points to "special" memory (e.g. a spill slot), it's
1419 // sufficient to check that it isn't aliased by any high-level IR value.
1420 if (!PSV
|| PSV
->mayAlias(&MFI
))
1421 return std::nullopt
;
1423 const MachineOperand
*BaseOp
;
1424 if (!TII
->getMemOperandWithOffset(MI
, BaseOp
, Offset
, OffsetIsScalable
,
1426 return std::nullopt
;
1428 // FIXME: Scalable offsets are not yet handled in the offset code below.
1429 if (OffsetIsScalable
)
1430 return std::nullopt
;
1432 // TODO: Can currently only handle mem instructions with a single define.
1433 // An example from the x86 target:
1435 // DIV64m $rsp, 1, $noreg, 24, $noreg, implicit-def dead $rax, implicit-def $rdx
1438 if (MI
.getNumExplicitDefs() != 1)
1439 return std::nullopt
;
1441 // TODO: In what way do we need to take Reg into consideration here?
1443 SmallVector
<uint64_t, 8> Ops
;
1444 DIExpression::appendOffset(Ops
, Offset
);
1445 Ops
.push_back(dwarf::DW_OP_deref_size
);
1446 Ops
.push_back(MMO
->getSize());
1447 Expr
= DIExpression::prependOpcodes(Expr
, Ops
);
1448 return ParamLoadedValue(*BaseOp
, Expr
);
1451 return std::nullopt
;
1454 /// Both DefMI and UseMI must be valid. By default, call directly to the
1455 /// itinerary. This may be overriden by the target.
1456 int TargetInstrInfo::getOperandLatency(const InstrItineraryData
*ItinData
,
1457 const MachineInstr
&DefMI
,
1459 const MachineInstr
&UseMI
,
1460 unsigned UseIdx
) const {
1461 unsigned DefClass
= DefMI
.getDesc().getSchedClass();
1462 unsigned UseClass
= UseMI
.getDesc().getSchedClass();
1463 return ItinData
->getOperandLatency(DefClass
, DefIdx
, UseClass
, UseIdx
);
1466 bool TargetInstrInfo::getRegSequenceInputs(
1467 const MachineInstr
&MI
, unsigned DefIdx
,
1468 SmallVectorImpl
<RegSubRegPairAndIdx
> &InputRegs
) const {
1469 assert((MI
.isRegSequence() ||
1470 MI
.isRegSequenceLike()) && "Instruction do not have the proper type");
1472 if (!MI
.isRegSequence())
1473 return getRegSequenceLikeInputs(MI
, DefIdx
, InputRegs
);
1475 // We are looking at:
1476 // Def = REG_SEQUENCE v0, sub0, v1, sub1, ...
1477 assert(DefIdx
== 0 && "REG_SEQUENCE only has one def");
1478 for (unsigned OpIdx
= 1, EndOpIdx
= MI
.getNumOperands(); OpIdx
!= EndOpIdx
;
1480 const MachineOperand
&MOReg
= MI
.getOperand(OpIdx
);
1481 if (MOReg
.isUndef())
1483 const MachineOperand
&MOSubIdx
= MI
.getOperand(OpIdx
+ 1);
1484 assert(MOSubIdx
.isImm() &&
1485 "One of the subindex of the reg_sequence is not an immediate");
1486 // Record Reg:SubReg, SubIdx.
1487 InputRegs
.push_back(RegSubRegPairAndIdx(MOReg
.getReg(), MOReg
.getSubReg(),
1488 (unsigned)MOSubIdx
.getImm()));
1493 bool TargetInstrInfo::getExtractSubregInputs(
1494 const MachineInstr
&MI
, unsigned DefIdx
,
1495 RegSubRegPairAndIdx
&InputReg
) const {
1496 assert((MI
.isExtractSubreg() ||
1497 MI
.isExtractSubregLike()) && "Instruction do not have the proper type");
1499 if (!MI
.isExtractSubreg())
1500 return getExtractSubregLikeInputs(MI
, DefIdx
, InputReg
);
1502 // We are looking at:
1503 // Def = EXTRACT_SUBREG v0.sub1, sub0.
1504 assert(DefIdx
== 0 && "EXTRACT_SUBREG only has one def");
1505 const MachineOperand
&MOReg
= MI
.getOperand(1);
1506 if (MOReg
.isUndef())
1508 const MachineOperand
&MOSubIdx
= MI
.getOperand(2);
1509 assert(MOSubIdx
.isImm() &&
1510 "The subindex of the extract_subreg is not an immediate");
1512 InputReg
.Reg
= MOReg
.getReg();
1513 InputReg
.SubReg
= MOReg
.getSubReg();
1514 InputReg
.SubIdx
= (unsigned)MOSubIdx
.getImm();
1518 bool TargetInstrInfo::getInsertSubregInputs(
1519 const MachineInstr
&MI
, unsigned DefIdx
,
1520 RegSubRegPair
&BaseReg
, RegSubRegPairAndIdx
&InsertedReg
) const {
1521 assert((MI
.isInsertSubreg() ||
1522 MI
.isInsertSubregLike()) && "Instruction do not have the proper type");
1524 if (!MI
.isInsertSubreg())
1525 return getInsertSubregLikeInputs(MI
, DefIdx
, BaseReg
, InsertedReg
);
1527 // We are looking at:
1528 // Def = INSERT_SEQUENCE v0, v1, sub0.
1529 assert(DefIdx
== 0 && "INSERT_SUBREG only has one def");
1530 const MachineOperand
&MOBaseReg
= MI
.getOperand(1);
1531 const MachineOperand
&MOInsertedReg
= MI
.getOperand(2);
1532 if (MOInsertedReg
.isUndef())
1534 const MachineOperand
&MOSubIdx
= MI
.getOperand(3);
1535 assert(MOSubIdx
.isImm() &&
1536 "One of the subindex of the reg_sequence is not an immediate");
1537 BaseReg
.Reg
= MOBaseReg
.getReg();
1538 BaseReg
.SubReg
= MOBaseReg
.getSubReg();
1540 InsertedReg
.Reg
= MOInsertedReg
.getReg();
1541 InsertedReg
.SubReg
= MOInsertedReg
.getSubReg();
1542 InsertedReg
.SubIdx
= (unsigned)MOSubIdx
.getImm();
1546 // Returns a MIRPrinter comment for this machine operand.
1547 std::string
TargetInstrInfo::createMIROperandComment(
1548 const MachineInstr
&MI
, const MachineOperand
&Op
, unsigned OpIdx
,
1549 const TargetRegisterInfo
*TRI
) const {
1551 if (!MI
.isInlineAsm())
1555 raw_string_ostream
OS(Flags
);
1557 if (OpIdx
== InlineAsm::MIOp_ExtraInfo
) {
1558 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1559 unsigned ExtraInfo
= Op
.getImm();
1561 for (StringRef Info
: InlineAsm::getExtraInfoNames(ExtraInfo
)) {
1571 int FlagIdx
= MI
.findInlineAsmFlagIdx(OpIdx
);
1572 if (FlagIdx
< 0 || (unsigned)FlagIdx
!= OpIdx
)
1575 assert(Op
.isImm() && "Expected flag operand to be an immediate");
1576 // Pretty print the inline asm operand descriptor.
1577 unsigned Flag
= Op
.getImm();
1578 unsigned Kind
= InlineAsm::getKind(Flag
);
1579 OS
<< InlineAsm::getKindName(Kind
);
1582 if (!InlineAsm::isImmKind(Flag
) && !InlineAsm::isMemKind(Flag
) &&
1583 InlineAsm::hasRegClassConstraint(Flag
, RCID
)) {
1585 OS
<< ':' << TRI
->getRegClassName(TRI
->getRegClass(RCID
));
1587 OS
<< ":RC" << RCID
;
1590 if (InlineAsm::isMemKind(Flag
)) {
1591 unsigned MCID
= InlineAsm::getMemoryConstraintID(Flag
);
1592 OS
<< ":" << InlineAsm::getMemConstraintName(MCID
);
1595 unsigned TiedTo
= 0;
1596 if (InlineAsm::isUseOperandTiedToDef(Flag
, TiedTo
))
1597 OS
<< " tiedto:$" << TiedTo
;
1602 TargetInstrInfo::PipelinerLoopInfo::~PipelinerLoopInfo() = default;
1604 void TargetInstrInfo::mergeOutliningCandidateAttributes(
1605 Function
&F
, std::vector
<outliner::Candidate
> &Candidates
) const {
1606 // Include target features from an arbitrary candidate for the outlined
1607 // function. This makes sure the outlined function knows what kinds of
1608 // instructions are going into it. This is fine, since all parent functions
1609 // must necessarily support the instructions that are in the outlined region.
1610 outliner::Candidate
&FirstCand
= Candidates
.front();
1611 const Function
&ParentFn
= FirstCand
.getMF()->getFunction();
1612 if (ParentFn
.hasFnAttribute("target-features"))
1613 F
.addFnAttr(ParentFn
.getFnAttribute("target-features"));
1614 if (ParentFn
.hasFnAttribute("target-cpu"))
1615 F
.addFnAttr(ParentFn
.getFnAttribute("target-cpu"));
1617 // Set nounwind, so we don't generate eh_frame.
1618 if (llvm::all_of(Candidates
, [](const outliner::Candidate
&C
) {
1619 return C
.getMF()->getFunction().hasFnAttribute(Attribute::NoUnwind
);
1621 F
.addFnAttr(Attribute::NoUnwind
);
1624 outliner::InstrType
TargetInstrInfo::getOutliningType(
1625 MachineBasicBlock::iterator
&MIT
, unsigned Flags
) const {
1626 MachineInstr
&MI
= *MIT
;
1628 // NOTE: MI.isMetaInstruction() will match CFI_INSTRUCTION, but some targets
1629 // have support for outlining those. Special-case that here.
1630 if (MI
.isCFIInstruction())
1631 // Just go right to the target implementation.
1632 return getOutliningTypeImpl(MIT
, Flags
);
1634 // Be conservative about inline assembly.
1635 if (MI
.isInlineAsm())
1636 return outliner::InstrType::Illegal
;
1638 // Labels generally can't safely be outlined.
1640 return outliner::InstrType::Illegal
;
1642 // Don't let debug instructions impact analysis.
1643 if (MI
.isDebugInstr())
1644 return outliner::InstrType::Invisible
;
1646 // Some other special cases.
1647 switch (MI
.getOpcode()) {
1648 case TargetOpcode::IMPLICIT_DEF
:
1649 case TargetOpcode::KILL
:
1650 case TargetOpcode::LIFETIME_START
:
1651 case TargetOpcode::LIFETIME_END
:
1652 return outliner::InstrType::Invisible
;
1657 // Is this a terminator for a basic block?
1658 if (MI
.isTerminator()) {
1659 // If this is a branch to another block, we can't outline it.
1660 if (!MI
.getParent()->succ_empty())
1661 return outliner::InstrType::Illegal
;
1663 // Don't outline if the branch is not unconditional.
1664 if (isPredicated(MI
))
1665 return outliner::InstrType::Illegal
;
1668 // Make sure none of the operands of this instruction do anything that
1669 // might break if they're moved outside their current function.
1670 // This includes MachineBasicBlock references, BlockAddressses,
1671 // Constant pool indices and jump table indices.
1673 // A quick note on MO_TargetIndex:
1674 // This doesn't seem to be used in any of the architectures that the
1675 // MachineOutliner supports, but it was still filtered out in all of them.
1676 // There was one exception (RISC-V), but MO_TargetIndex also isn't used there.
1677 // As such, this check is removed both here and in the target-specific
1678 // implementations. Instead, we assert to make sure this doesn't
1679 // catch anyone off-guard somewhere down the line.
1680 for (const MachineOperand
&MOP
: MI
.operands()) {
1681 // If you hit this assertion, please remove it and adjust
1682 // `getOutliningTypeImpl` for your target appropriately if necessary.
1683 // Adding the assertion back to other supported architectures
1684 // would be nice too :)
1685 assert(!MOP
.isTargetIndex() && "This isn't used quite yet!");
1687 // CFI instructions should already have been filtered out at this point.
1688 assert(!MOP
.isCFIIndex() && "CFI instructions handled elsewhere!");
1690 // PrologEpilogInserter should've already run at this point.
1691 assert(!MOP
.isFI() && "FrameIndex instructions should be gone by now!");
1693 if (MOP
.isMBB() || MOP
.isBlockAddress() || MOP
.isCPI() || MOP
.isJTI())
1694 return outliner::InstrType::Illegal
;
1697 // If we don't know, delegate to the target-specific hook.
1698 return getOutliningTypeImpl(MIT
, Flags
);
1701 bool TargetInstrInfo::isMBBSafeToOutlineFrom(MachineBasicBlock
&MBB
,
1702 unsigned &Flags
) const {
1703 // Some instrumentations create special TargetOpcode at the start which
1704 // expands to special code sequences which must be present.
1705 auto First
= MBB
.getFirstNonDebugInstr();
1706 if (First
== MBB
.end())
1709 if (First
->getOpcode() == TargetOpcode::FENTRY_CALL
||
1710 First
->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER
)
1713 // Some instrumentations create special pseudo-instructions at or just before
1714 // the end that must be present.
1715 auto Last
= MBB
.getLastNonDebugInstr();
1716 if (Last
->getOpcode() == TargetOpcode::PATCHABLE_RET
||
1717 Last
->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL
)
1720 if (Last
!= First
&& Last
->isReturn()) {
1722 if (Last
->getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_EXIT
||
1723 Last
->getOpcode() == TargetOpcode::PATCHABLE_TAIL_CALL
)