[MachineScheduler] Fix physreg dependencies of ExitSU (#123541)
[llvm-project.git] / llvm / lib / CodeGen / MachineInstr.cpp
bloba9f756b684360302d411d3e8e5d01ecd85e9b50e
1 //===- lib/CodeGen/MachineInstr.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Methods common to all machine instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/ADT/ArrayRef.h"
15 #include "llvm/ADT/Hashing.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallBitVector.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/MemoryLocation.h"
21 #include "llvm/CodeGen/LiveRegUnits.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineInstrBundle.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineOperand.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/PseudoSourceValue.h"
32 #include "llvm/CodeGen/Register.h"
33 #include "llvm/CodeGen/StackMaps.h"
34 #include "llvm/CodeGen/TargetInstrInfo.h"
35 #include "llvm/CodeGen/TargetRegisterInfo.h"
36 #include "llvm/CodeGen/TargetSubtargetInfo.h"
37 #include "llvm/CodeGenTypes/LowLevelType.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DebugInfoMetadata.h"
40 #include "llvm/IR/DebugLoc.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InlineAsm.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/LLVMContext.h"
45 #include "llvm/IR/Metadata.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/ModuleSlotTracker.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/MC/MCInstrDesc.h"
50 #include "llvm/MC/MCRegisterInfo.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Compiler.h"
53 #include "llvm/Support/Debug.h"
54 #include "llvm/Support/ErrorHandling.h"
55 #include "llvm/Support/FormattedStream.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Target/TargetMachine.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstdint>
61 #include <cstring>
62 #include <utility>
64 using namespace llvm;
66 static const MachineFunction *getMFIfAvailable(const MachineInstr &MI) {
67 if (const MachineBasicBlock *MBB = MI.getParent())
68 if (const MachineFunction *MF = MBB->getParent())
69 return MF;
70 return nullptr;
73 // Try to crawl up to the machine function and get TRI and IntrinsicInfo from
74 // it.
75 static void tryToGetTargetInfo(const MachineInstr &MI,
76 const TargetRegisterInfo *&TRI,
77 const MachineRegisterInfo *&MRI,
78 const TargetIntrinsicInfo *&IntrinsicInfo,
79 const TargetInstrInfo *&TII) {
81 if (const MachineFunction *MF = getMFIfAvailable(MI)) {
82 TRI = MF->getSubtarget().getRegisterInfo();
83 MRI = &MF->getRegInfo();
84 IntrinsicInfo = MF->getTarget().getIntrinsicInfo();
85 TII = MF->getSubtarget().getInstrInfo();
89 void MachineInstr::addImplicitDefUseOperands(MachineFunction &MF) {
90 for (MCPhysReg ImpDef : MCID->implicit_defs())
91 addOperand(MF, MachineOperand::CreateReg(ImpDef, true, true));
92 for (MCPhysReg ImpUse : MCID->implicit_uses())
93 addOperand(MF, MachineOperand::CreateReg(ImpUse, false, true));
96 /// MachineInstr ctor - This constructor creates a MachineInstr and adds the
97 /// implicit operands. It reserves space for the number of operands specified by
98 /// the MCInstrDesc.
99 MachineInstr::MachineInstr(MachineFunction &MF, const MCInstrDesc &TID,
100 DebugLoc DL, bool NoImp)
101 : MCID(&TID), NumOperands(0), Flags(0), AsmPrinterFlags(0),
102 DbgLoc(std::move(DL)), DebugInstrNum(0), Opcode(TID.Opcode) {
103 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
105 // Reserve space for the expected number of operands.
106 if (unsigned NumOps = MCID->getNumOperands() + MCID->implicit_defs().size() +
107 MCID->implicit_uses().size()) {
108 CapOperands = OperandCapacity::get(NumOps);
109 Operands = MF.allocateOperandArray(CapOperands);
112 if (!NoImp)
113 addImplicitDefUseOperands(MF);
116 /// MachineInstr ctor - Copies MachineInstr arg exactly.
117 /// Does not copy the number from debug instruction numbering, to preserve
118 /// uniqueness.
119 MachineInstr::MachineInstr(MachineFunction &MF, const MachineInstr &MI)
120 : MCID(&MI.getDesc()), NumOperands(0), Flags(0), AsmPrinterFlags(0),
121 Info(MI.Info), DbgLoc(MI.getDebugLoc()), DebugInstrNum(0),
122 Opcode(MI.getOpcode()) {
123 assert(DbgLoc.hasTrivialDestructor() && "Expected trivial destructor");
125 CapOperands = OperandCapacity::get(MI.getNumOperands());
126 Operands = MF.allocateOperandArray(CapOperands);
128 // Copy operands.
129 for (const MachineOperand &MO : MI.operands())
130 addOperand(MF, MO);
132 // Replicate ties between the operands, which addOperand was not
133 // able to do reliably.
134 for (unsigned i = 0, e = getNumOperands(); i < e; ++i) {
135 MachineOperand &NewMO = getOperand(i);
136 const MachineOperand &OrigMO = MI.getOperand(i);
137 NewMO.TiedTo = OrigMO.TiedTo;
140 // Copy all the sensible flags.
141 setFlags(MI.Flags);
144 void MachineInstr::setDesc(const MCInstrDesc &TID) {
145 if (getParent())
146 getMF()->handleChangeDesc(*this, TID);
147 MCID = &TID;
148 Opcode = TID.Opcode;
151 void MachineInstr::moveBefore(MachineInstr *MovePos) {
152 MovePos->getParent()->splice(MovePos, getParent(), getIterator());
155 /// getRegInfo - If this instruction is embedded into a MachineFunction,
156 /// return the MachineRegisterInfo object for the current function, otherwise
157 /// return null.
158 MachineRegisterInfo *MachineInstr::getRegInfo() {
159 if (MachineBasicBlock *MBB = getParent())
160 return &MBB->getParent()->getRegInfo();
161 return nullptr;
164 const MachineRegisterInfo *MachineInstr::getRegInfo() const {
165 if (const MachineBasicBlock *MBB = getParent())
166 return &MBB->getParent()->getRegInfo();
167 return nullptr;
170 void MachineInstr::removeRegOperandsFromUseLists(MachineRegisterInfo &MRI) {
171 for (MachineOperand &MO : operands())
172 if (MO.isReg())
173 MRI.removeRegOperandFromUseList(&MO);
176 void MachineInstr::addRegOperandsToUseLists(MachineRegisterInfo &MRI) {
177 for (MachineOperand &MO : operands())
178 if (MO.isReg())
179 MRI.addRegOperandToUseList(&MO);
182 void MachineInstr::addOperand(const MachineOperand &Op) {
183 MachineBasicBlock *MBB = getParent();
184 assert(MBB && "Use MachineInstrBuilder to add operands to dangling instrs");
185 MachineFunction *MF = MBB->getParent();
186 assert(MF && "Use MachineInstrBuilder to add operands to dangling instrs");
187 addOperand(*MF, Op);
190 /// Move NumOps MachineOperands from Src to Dst, with support for overlapping
191 /// ranges. If MRI is non-null also update use-def chains.
192 static void moveOperands(MachineOperand *Dst, MachineOperand *Src,
193 unsigned NumOps, MachineRegisterInfo *MRI) {
194 if (MRI)
195 return MRI->moveOperands(Dst, Src, NumOps);
196 // MachineOperand is a trivially copyable type so we can just use memmove.
197 assert(Dst && Src && "Unknown operands");
198 std::memmove(Dst, Src, NumOps * sizeof(MachineOperand));
201 /// addOperand - Add the specified operand to the instruction. If it is an
202 /// implicit operand, it is added to the end of the operand list. If it is
203 /// an explicit operand it is added at the end of the explicit operand list
204 /// (before the first implicit operand).
205 void MachineInstr::addOperand(MachineFunction &MF, const MachineOperand &Op) {
206 assert(isUInt<LLVM_MI_NUMOPERANDS_BITS>(NumOperands + 1) &&
207 "Cannot add more operands.");
208 assert(MCID && "Cannot add operands before providing an instr descriptor");
210 // Check if we're adding one of our existing operands.
211 if (&Op >= Operands && &Op < Operands + NumOperands) {
212 // This is unusual: MI->addOperand(MI->getOperand(i)).
213 // If adding Op requires reallocating or moving existing operands around,
214 // the Op reference could go stale. Support it by copying Op.
215 MachineOperand CopyOp(Op);
216 return addOperand(MF, CopyOp);
219 // Find the insert location for the new operand. Implicit registers go at
220 // the end, everything else goes before the implicit regs.
222 // FIXME: Allow mixed explicit and implicit operands on inline asm.
223 // InstrEmitter::EmitSpecialNode() is marking inline asm clobbers as
224 // implicit-defs, but they must not be moved around. See the FIXME in
225 // InstrEmitter.cpp.
226 unsigned OpNo = getNumOperands();
227 bool isImpReg = Op.isReg() && Op.isImplicit();
228 if (!isImpReg && !isInlineAsm()) {
229 while (OpNo && Operands[OpNo-1].isReg() && Operands[OpNo-1].isImplicit()) {
230 --OpNo;
231 assert(!Operands[OpNo].isTied() && "Cannot move tied operands");
235 // OpNo now points as the desired insertion point. Unless this is a variadic
236 // instruction, only implicit regs are allowed beyond MCID->getNumOperands().
237 // RegMask operands go between the explicit and implicit operands.
238 MachineRegisterInfo *MRI = getRegInfo();
240 // Determine if the Operands array needs to be reallocated.
241 // Save the old capacity and operand array.
242 OperandCapacity OldCap = CapOperands;
243 MachineOperand *OldOperands = Operands;
244 if (!OldOperands || OldCap.getSize() == getNumOperands()) {
245 CapOperands = OldOperands ? OldCap.getNext() : OldCap.get(1);
246 Operands = MF.allocateOperandArray(CapOperands);
247 // Move the operands before the insertion point.
248 if (OpNo)
249 moveOperands(Operands, OldOperands, OpNo, MRI);
252 // Move the operands following the insertion point.
253 if (OpNo != NumOperands)
254 moveOperands(Operands + OpNo + 1, OldOperands + OpNo, NumOperands - OpNo,
255 MRI);
256 ++NumOperands;
258 // Deallocate the old operand array.
259 if (OldOperands != Operands && OldOperands)
260 MF.deallocateOperandArray(OldCap, OldOperands);
262 // Copy Op into place. It still needs to be inserted into the MRI use lists.
263 MachineOperand *NewMO = new (Operands + OpNo) MachineOperand(Op);
264 NewMO->ParentMI = this;
266 // When adding a register operand, tell MRI about it.
267 if (NewMO->isReg()) {
268 // Ensure isOnRegUseList() returns false, regardless of Op's status.
269 NewMO->Contents.Reg.Prev = nullptr;
270 // Ignore existing ties. This is not a property that can be copied.
271 NewMO->TiedTo = 0;
272 // Add the new operand to MRI, but only for instructions in an MBB.
273 if (MRI)
274 MRI->addRegOperandToUseList(NewMO);
275 // The MCID operand information isn't accurate until we start adding
276 // explicit operands. The implicit operands are added first, then the
277 // explicits are inserted before them.
278 if (!isImpReg) {
279 // Tie uses to defs as indicated in MCInstrDesc.
280 if (NewMO->isUse()) {
281 int DefIdx = MCID->getOperandConstraint(OpNo, MCOI::TIED_TO);
282 if (DefIdx != -1)
283 tieOperands(DefIdx, OpNo);
285 // If the register operand is flagged as early, mark the operand as such.
286 if (MCID->getOperandConstraint(OpNo, MCOI::EARLY_CLOBBER) != -1)
287 NewMO->setIsEarlyClobber(true);
289 // Ensure debug instructions set debug flag on register uses.
290 if (NewMO->isUse() && isDebugInstr())
291 NewMO->setIsDebug();
295 void MachineInstr::removeOperand(unsigned OpNo) {
296 assert(OpNo < getNumOperands() && "Invalid operand number");
297 untieRegOperand(OpNo);
299 #ifndef NDEBUG
300 // Moving tied operands would break the ties.
301 for (unsigned i = OpNo + 1, e = getNumOperands(); i != e; ++i)
302 if (Operands[i].isReg())
303 assert(!Operands[i].isTied() && "Cannot move tied operands");
304 #endif
306 MachineRegisterInfo *MRI = getRegInfo();
307 if (MRI && Operands[OpNo].isReg())
308 MRI->removeRegOperandFromUseList(Operands + OpNo);
310 // Don't call the MachineOperand destructor. A lot of this code depends on
311 // MachineOperand having a trivial destructor anyway, and adding a call here
312 // wouldn't make it 'destructor-correct'.
314 if (unsigned N = NumOperands - 1 - OpNo)
315 moveOperands(Operands + OpNo, Operands + OpNo + 1, N, MRI);
316 --NumOperands;
319 void MachineInstr::setExtraInfo(MachineFunction &MF,
320 ArrayRef<MachineMemOperand *> MMOs,
321 MCSymbol *PreInstrSymbol,
322 MCSymbol *PostInstrSymbol,
323 MDNode *HeapAllocMarker, MDNode *PCSections,
324 uint32_t CFIType, MDNode *MMRAs) {
325 bool HasPreInstrSymbol = PreInstrSymbol != nullptr;
326 bool HasPostInstrSymbol = PostInstrSymbol != nullptr;
327 bool HasHeapAllocMarker = HeapAllocMarker != nullptr;
328 bool HasPCSections = PCSections != nullptr;
329 bool HasCFIType = CFIType != 0;
330 bool HasMMRAs = MMRAs != nullptr;
331 int NumPointers = MMOs.size() + HasPreInstrSymbol + HasPostInstrSymbol +
332 HasHeapAllocMarker + HasPCSections + HasCFIType + HasMMRAs;
334 // Drop all extra info if there is none.
335 if (NumPointers <= 0) {
336 Info.clear();
337 return;
340 // If more than one pointer, then store out of line. Store heap alloc markers
341 // out of line because PointerSumType cannot hold more than 4 tag types with
342 // 32-bit pointers.
343 // FIXME: Maybe we should make the symbols in the extra info mutable?
344 else if (NumPointers > 1 || HasMMRAs || HasHeapAllocMarker || HasPCSections ||
345 HasCFIType) {
346 Info.set<EIIK_OutOfLine>(
347 MF.createMIExtraInfo(MMOs, PreInstrSymbol, PostInstrSymbol,
348 HeapAllocMarker, PCSections, CFIType, MMRAs));
349 return;
352 // Otherwise store the single pointer inline.
353 if (HasPreInstrSymbol)
354 Info.set<EIIK_PreInstrSymbol>(PreInstrSymbol);
355 else if (HasPostInstrSymbol)
356 Info.set<EIIK_PostInstrSymbol>(PostInstrSymbol);
357 else
358 Info.set<EIIK_MMO>(MMOs[0]);
361 void MachineInstr::dropMemRefs(MachineFunction &MF) {
362 if (memoperands_empty())
363 return;
365 setExtraInfo(MF, {}, getPreInstrSymbol(), getPostInstrSymbol(),
366 getHeapAllocMarker(), getPCSections(), getCFIType(),
367 getMMRAMetadata());
370 void MachineInstr::setMemRefs(MachineFunction &MF,
371 ArrayRef<MachineMemOperand *> MMOs) {
372 if (MMOs.empty()) {
373 dropMemRefs(MF);
374 return;
377 setExtraInfo(MF, MMOs, getPreInstrSymbol(), getPostInstrSymbol(),
378 getHeapAllocMarker(), getPCSections(), getCFIType(),
379 getMMRAMetadata());
382 void MachineInstr::addMemOperand(MachineFunction &MF,
383 MachineMemOperand *MO) {
384 SmallVector<MachineMemOperand *, 2> MMOs;
385 MMOs.append(memoperands_begin(), memoperands_end());
386 MMOs.push_back(MO);
387 setMemRefs(MF, MMOs);
390 void MachineInstr::cloneMemRefs(MachineFunction &MF, const MachineInstr &MI) {
391 if (this == &MI)
392 // Nothing to do for a self-clone!
393 return;
395 assert(&MF == MI.getMF() &&
396 "Invalid machine functions when cloning memory refrences!");
397 // See if we can just steal the extra info already allocated for the
398 // instruction. We can do this whenever the pre- and post-instruction symbols
399 // are the same (including null).
400 if (getPreInstrSymbol() == MI.getPreInstrSymbol() &&
401 getPostInstrSymbol() == MI.getPostInstrSymbol() &&
402 getHeapAllocMarker() == MI.getHeapAllocMarker() &&
403 getPCSections() == MI.getPCSections() && getMMRAMetadata() &&
404 MI.getMMRAMetadata()) {
405 Info = MI.Info;
406 return;
409 // Otherwise, fall back on a copy-based clone.
410 setMemRefs(MF, MI.memoperands());
413 /// Check to see if the MMOs pointed to by the two MemRefs arrays are
414 /// identical.
415 static bool hasIdenticalMMOs(ArrayRef<MachineMemOperand *> LHS,
416 ArrayRef<MachineMemOperand *> RHS) {
417 if (LHS.size() != RHS.size())
418 return false;
420 auto LHSPointees = make_pointee_range(LHS);
421 auto RHSPointees = make_pointee_range(RHS);
422 return std::equal(LHSPointees.begin(), LHSPointees.end(),
423 RHSPointees.begin());
426 void MachineInstr::cloneMergedMemRefs(MachineFunction &MF,
427 ArrayRef<const MachineInstr *> MIs) {
428 // Try handling easy numbers of MIs with simpler mechanisms.
429 if (MIs.empty()) {
430 dropMemRefs(MF);
431 return;
433 if (MIs.size() == 1) {
434 cloneMemRefs(MF, *MIs[0]);
435 return;
437 // Because an empty memoperands list provides *no* information and must be
438 // handled conservatively (assuming the instruction can do anything), the only
439 // way to merge with it is to drop all other memoperands.
440 if (MIs[0]->memoperands_empty()) {
441 dropMemRefs(MF);
442 return;
445 // Handle the general case.
446 SmallVector<MachineMemOperand *, 2> MergedMMOs;
447 // Start with the first instruction.
448 assert(&MF == MIs[0]->getMF() &&
449 "Invalid machine functions when cloning memory references!");
450 MergedMMOs.append(MIs[0]->memoperands_begin(), MIs[0]->memoperands_end());
451 // Now walk all the other instructions and accumulate any different MMOs.
452 for (const MachineInstr &MI : make_pointee_range(MIs.slice(1))) {
453 assert(&MF == MI.getMF() &&
454 "Invalid machine functions when cloning memory references!");
456 // Skip MIs with identical operands to the first. This is a somewhat
457 // arbitrary hack but will catch common cases without being quadratic.
458 // TODO: We could fully implement merge semantics here if needed.
459 if (hasIdenticalMMOs(MIs[0]->memoperands(), MI.memoperands()))
460 continue;
462 // Because an empty memoperands list provides *no* information and must be
463 // handled conservatively (assuming the instruction can do anything), the
464 // only way to merge with it is to drop all other memoperands.
465 if (MI.memoperands_empty()) {
466 dropMemRefs(MF);
467 return;
470 // Otherwise accumulate these into our temporary buffer of the merged state.
471 MergedMMOs.append(MI.memoperands_begin(), MI.memoperands_end());
474 setMemRefs(MF, MergedMMOs);
477 void MachineInstr::setPreInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
478 // Do nothing if old and new symbols are the same.
479 if (Symbol == getPreInstrSymbol())
480 return;
482 // If there was only one symbol and we're removing it, just clear info.
483 if (!Symbol && Info.is<EIIK_PreInstrSymbol>()) {
484 Info.clear();
485 return;
488 setExtraInfo(MF, memoperands(), Symbol, getPostInstrSymbol(),
489 getHeapAllocMarker(), getPCSections(), getCFIType(),
490 getMMRAMetadata());
493 void MachineInstr::setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol) {
494 // Do nothing if old and new symbols are the same.
495 if (Symbol == getPostInstrSymbol())
496 return;
498 // If there was only one symbol and we're removing it, just clear info.
499 if (!Symbol && Info.is<EIIK_PostInstrSymbol>()) {
500 Info.clear();
501 return;
504 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), Symbol,
505 getHeapAllocMarker(), getPCSections(), getCFIType(),
506 getMMRAMetadata());
509 void MachineInstr::setHeapAllocMarker(MachineFunction &MF, MDNode *Marker) {
510 // Do nothing if old and new symbols are the same.
511 if (Marker == getHeapAllocMarker())
512 return;
514 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
515 Marker, getPCSections(), getCFIType(), getMMRAMetadata());
518 void MachineInstr::setPCSections(MachineFunction &MF, MDNode *PCSections) {
519 // Do nothing if old and new symbols are the same.
520 if (PCSections == getPCSections())
521 return;
523 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
524 getHeapAllocMarker(), PCSections, getCFIType(),
525 getMMRAMetadata());
528 void MachineInstr::setCFIType(MachineFunction &MF, uint32_t Type) {
529 // Do nothing if old and new types are the same.
530 if (Type == getCFIType())
531 return;
533 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
534 getHeapAllocMarker(), getPCSections(), Type, getMMRAMetadata());
537 void MachineInstr::setMMRAMetadata(MachineFunction &MF, MDNode *MMRAs) {
538 // Do nothing if old and new symbols are the same.
539 if (MMRAs == getMMRAMetadata())
540 return;
542 setExtraInfo(MF, memoperands(), getPreInstrSymbol(), getPostInstrSymbol(),
543 getHeapAllocMarker(), getPCSections(), getCFIType(), MMRAs);
546 void MachineInstr::cloneInstrSymbols(MachineFunction &MF,
547 const MachineInstr &MI) {
548 if (this == &MI)
549 // Nothing to do for a self-clone!
550 return;
552 assert(&MF == MI.getMF() &&
553 "Invalid machine functions when cloning instruction symbols!");
555 setPreInstrSymbol(MF, MI.getPreInstrSymbol());
556 setPostInstrSymbol(MF, MI.getPostInstrSymbol());
557 setHeapAllocMarker(MF, MI.getHeapAllocMarker());
558 setPCSections(MF, MI.getPCSections());
559 setMMRAMetadata(MF, MI.getMMRAMetadata());
562 uint32_t MachineInstr::mergeFlagsWith(const MachineInstr &Other) const {
563 // For now, the just return the union of the flags. If the flags get more
564 // complicated over time, we might need more logic here.
565 return getFlags() | Other.getFlags();
568 uint32_t MachineInstr::copyFlagsFromInstruction(const Instruction &I) {
569 uint32_t MIFlags = 0;
570 // Copy the wrapping flags.
571 if (const OverflowingBinaryOperator *OB =
572 dyn_cast<OverflowingBinaryOperator>(&I)) {
573 if (OB->hasNoSignedWrap())
574 MIFlags |= MachineInstr::MIFlag::NoSWrap;
575 if (OB->hasNoUnsignedWrap())
576 MIFlags |= MachineInstr::MIFlag::NoUWrap;
577 } else if (const TruncInst *TI = dyn_cast<TruncInst>(&I)) {
578 if (TI->hasNoSignedWrap())
579 MIFlags |= MachineInstr::MIFlag::NoSWrap;
580 if (TI->hasNoUnsignedWrap())
581 MIFlags |= MachineInstr::MIFlag::NoUWrap;
582 } else if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
583 if (GEP->hasNoUnsignedSignedWrap())
584 MIFlags |= MachineInstr::MIFlag::NoUSWrap;
585 if (GEP->hasNoUnsignedWrap())
586 MIFlags |= MachineInstr::MIFlag::NoUWrap;
589 // Copy the nonneg flag.
590 if (const PossiblyNonNegInst *PNI = dyn_cast<PossiblyNonNegInst>(&I)) {
591 if (PNI->hasNonNeg())
592 MIFlags |= MachineInstr::MIFlag::NonNeg;
593 // Copy the disjoint flag.
594 } else if (const PossiblyDisjointInst *PD =
595 dyn_cast<PossiblyDisjointInst>(&I)) {
596 if (PD->isDisjoint())
597 MIFlags |= MachineInstr::MIFlag::Disjoint;
600 // Copy the samesign flag.
601 if (const ICmpInst *ICmp = dyn_cast<ICmpInst>(&I))
602 if (ICmp->hasSameSign())
603 MIFlags |= MachineInstr::MIFlag::SameSign;
605 // Copy the exact flag.
606 if (const PossiblyExactOperator *PE = dyn_cast<PossiblyExactOperator>(&I))
607 if (PE->isExact())
608 MIFlags |= MachineInstr::MIFlag::IsExact;
610 // Copy the fast-math flags.
611 if (const FPMathOperator *FP = dyn_cast<FPMathOperator>(&I)) {
612 const FastMathFlags Flags = FP->getFastMathFlags();
613 if (Flags.noNaNs())
614 MIFlags |= MachineInstr::MIFlag::FmNoNans;
615 if (Flags.noInfs())
616 MIFlags |= MachineInstr::MIFlag::FmNoInfs;
617 if (Flags.noSignedZeros())
618 MIFlags |= MachineInstr::MIFlag::FmNsz;
619 if (Flags.allowReciprocal())
620 MIFlags |= MachineInstr::MIFlag::FmArcp;
621 if (Flags.allowContract())
622 MIFlags |= MachineInstr::MIFlag::FmContract;
623 if (Flags.approxFunc())
624 MIFlags |= MachineInstr::MIFlag::FmAfn;
625 if (Flags.allowReassoc())
626 MIFlags |= MachineInstr::MIFlag::FmReassoc;
629 if (I.getMetadata(LLVMContext::MD_unpredictable))
630 MIFlags |= MachineInstr::MIFlag::Unpredictable;
632 return MIFlags;
635 void MachineInstr::copyIRFlags(const Instruction &I) {
636 Flags = copyFlagsFromInstruction(I);
639 bool MachineInstr::hasPropertyInBundle(uint64_t Mask, QueryType Type) const {
640 assert(!isBundledWithPred() && "Must be called on bundle header");
641 for (MachineBasicBlock::const_instr_iterator MII = getIterator();; ++MII) {
642 if (MII->getDesc().getFlags() & Mask) {
643 if (Type == AnyInBundle)
644 return true;
645 } else {
646 if (Type == AllInBundle && !MII->isBundle())
647 return false;
649 // This was the last instruction in the bundle.
650 if (!MII->isBundledWithSucc())
651 return Type == AllInBundle;
655 bool MachineInstr::isIdenticalTo(const MachineInstr &Other,
656 MICheckType Check) const {
657 // If opcodes or number of operands are not the same then the two
658 // instructions are obviously not identical.
659 if (Other.getOpcode() != getOpcode() ||
660 Other.getNumOperands() != getNumOperands())
661 return false;
663 if (isBundle()) {
664 // We have passed the test above that both instructions have the same
665 // opcode, so we know that both instructions are bundles here. Let's compare
666 // MIs inside the bundle.
667 assert(Other.isBundle() && "Expected that both instructions are bundles.");
668 MachineBasicBlock::const_instr_iterator I1 = getIterator();
669 MachineBasicBlock::const_instr_iterator I2 = Other.getIterator();
670 // Loop until we analysed the last intruction inside at least one of the
671 // bundles.
672 while (I1->isBundledWithSucc() && I2->isBundledWithSucc()) {
673 ++I1;
674 ++I2;
675 if (!I1->isIdenticalTo(*I2, Check))
676 return false;
678 // If we've reached the end of just one of the two bundles, but not both,
679 // the instructions are not identical.
680 if (I1->isBundledWithSucc() || I2->isBundledWithSucc())
681 return false;
684 // Check operands to make sure they match.
685 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
686 const MachineOperand &MO = getOperand(i);
687 const MachineOperand &OMO = Other.getOperand(i);
688 if (!MO.isReg()) {
689 if (!MO.isIdenticalTo(OMO))
690 return false;
691 continue;
694 // Clients may or may not want to ignore defs when testing for equality.
695 // For example, machine CSE pass only cares about finding common
696 // subexpressions, so it's safe to ignore virtual register defs.
697 if (MO.isDef()) {
698 if (Check == IgnoreDefs)
699 continue;
700 else if (Check == IgnoreVRegDefs) {
701 if (!MO.getReg().isVirtual() || !OMO.getReg().isVirtual())
702 if (!MO.isIdenticalTo(OMO))
703 return false;
704 } else {
705 if (!MO.isIdenticalTo(OMO))
706 return false;
707 if (Check == CheckKillDead && MO.isDead() != OMO.isDead())
708 return false;
710 } else {
711 if (!MO.isIdenticalTo(OMO))
712 return false;
713 if (Check == CheckKillDead && MO.isKill() != OMO.isKill())
714 return false;
717 // If DebugLoc does not match then two debug instructions are not identical.
718 if (isDebugInstr())
719 if (getDebugLoc() && Other.getDebugLoc() &&
720 getDebugLoc() != Other.getDebugLoc())
721 return false;
722 // If pre- or post-instruction symbols do not match then the two instructions
723 // are not identical.
724 if (getPreInstrSymbol() != Other.getPreInstrSymbol() ||
725 getPostInstrSymbol() != Other.getPostInstrSymbol())
726 return false;
727 // Call instructions with different CFI types are not identical.
728 if (isCall() && getCFIType() != Other.getCFIType())
729 return false;
731 return true;
734 bool MachineInstr::isEquivalentDbgInstr(const MachineInstr &Other) const {
735 if (!isDebugValueLike() || !Other.isDebugValueLike())
736 return false;
737 if (getDebugLoc() != Other.getDebugLoc())
738 return false;
739 if (getDebugVariable() != Other.getDebugVariable())
740 return false;
741 if (getNumDebugOperands() != Other.getNumDebugOperands())
742 return false;
743 for (unsigned OpIdx = 0; OpIdx < getNumDebugOperands(); ++OpIdx)
744 if (!getDebugOperand(OpIdx).isIdenticalTo(Other.getDebugOperand(OpIdx)))
745 return false;
746 if (!DIExpression::isEqualExpression(
747 getDebugExpression(), isIndirectDebugValue(),
748 Other.getDebugExpression(), Other.isIndirectDebugValue()))
749 return false;
750 return true;
753 const MachineFunction *MachineInstr::getMF() const {
754 return getParent()->getParent();
757 MachineInstr *MachineInstr::removeFromParent() {
758 assert(getParent() && "Not embedded in a basic block!");
759 return getParent()->remove(this);
762 MachineInstr *MachineInstr::removeFromBundle() {
763 assert(getParent() && "Not embedded in a basic block!");
764 return getParent()->remove_instr(this);
767 void MachineInstr::eraseFromParent() {
768 assert(getParent() && "Not embedded in a basic block!");
769 getParent()->erase(this);
772 void MachineInstr::eraseFromBundle() {
773 assert(getParent() && "Not embedded in a basic block!");
774 getParent()->erase_instr(this);
777 bool MachineInstr::isCandidateForAdditionalCallInfo(QueryType Type) const {
778 if (!isCall(Type))
779 return false;
780 switch (getOpcode()) {
781 case TargetOpcode::PATCHPOINT:
782 case TargetOpcode::STACKMAP:
783 case TargetOpcode::STATEPOINT:
784 case TargetOpcode::FENTRY_CALL:
785 return false;
787 return true;
790 bool MachineInstr::shouldUpdateAdditionalCallInfo() const {
791 if (isBundle())
792 return isCandidateForAdditionalCallInfo(MachineInstr::AnyInBundle);
793 return isCandidateForAdditionalCallInfo();
796 unsigned MachineInstr::getNumExplicitOperands() const {
797 unsigned NumOperands = MCID->getNumOperands();
798 if (!MCID->isVariadic())
799 return NumOperands;
801 for (unsigned I = NumOperands, E = getNumOperands(); I != E; ++I) {
802 const MachineOperand &MO = getOperand(I);
803 // The operands must always be in the following order:
804 // - explicit reg defs,
805 // - other explicit operands (reg uses, immediates, etc.),
806 // - implicit reg defs
807 // - implicit reg uses
808 if (MO.isReg() && MO.isImplicit())
809 break;
810 ++NumOperands;
812 return NumOperands;
815 unsigned MachineInstr::getNumExplicitDefs() const {
816 unsigned NumDefs = MCID->getNumDefs();
817 if (!MCID->isVariadic())
818 return NumDefs;
820 for (unsigned I = NumDefs, E = getNumOperands(); I != E; ++I) {
821 const MachineOperand &MO = getOperand(I);
822 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
823 break;
824 ++NumDefs;
826 return NumDefs;
829 void MachineInstr::bundleWithPred() {
830 assert(!isBundledWithPred() && "MI is already bundled with its predecessor");
831 setFlag(BundledPred);
832 MachineBasicBlock::instr_iterator Pred = getIterator();
833 --Pred;
834 assert(!Pred->isBundledWithSucc() && "Inconsistent bundle flags");
835 Pred->setFlag(BundledSucc);
838 void MachineInstr::bundleWithSucc() {
839 assert(!isBundledWithSucc() && "MI is already bundled with its successor");
840 setFlag(BundledSucc);
841 MachineBasicBlock::instr_iterator Succ = getIterator();
842 ++Succ;
843 assert(!Succ->isBundledWithPred() && "Inconsistent bundle flags");
844 Succ->setFlag(BundledPred);
847 void MachineInstr::unbundleFromPred() {
848 assert(isBundledWithPred() && "MI isn't bundled with its predecessor");
849 clearFlag(BundledPred);
850 MachineBasicBlock::instr_iterator Pred = getIterator();
851 --Pred;
852 assert(Pred->isBundledWithSucc() && "Inconsistent bundle flags");
853 Pred->clearFlag(BundledSucc);
856 void MachineInstr::unbundleFromSucc() {
857 assert(isBundledWithSucc() && "MI isn't bundled with its successor");
858 clearFlag(BundledSucc);
859 MachineBasicBlock::instr_iterator Succ = getIterator();
860 ++Succ;
861 assert(Succ->isBundledWithPred() && "Inconsistent bundle flags");
862 Succ->clearFlag(BundledPred);
865 bool MachineInstr::isStackAligningInlineAsm() const {
866 if (isInlineAsm()) {
867 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
868 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
869 return true;
871 return false;
874 InlineAsm::AsmDialect MachineInstr::getInlineAsmDialect() const {
875 assert(isInlineAsm() && "getInlineAsmDialect() only works for inline asms!");
876 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
877 return InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect) != 0);
880 int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx,
881 unsigned *GroupNo) const {
882 assert(isInlineAsm() && "Expected an inline asm instruction");
883 assert(OpIdx < getNumOperands() && "OpIdx out of range");
885 // Ignore queries about the initial operands.
886 if (OpIdx < InlineAsm::MIOp_FirstOperand)
887 return -1;
889 unsigned Group = 0;
890 unsigned NumOps;
891 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
892 i += NumOps) {
893 const MachineOperand &FlagMO = getOperand(i);
894 // If we reach the implicit register operands, stop looking.
895 if (!FlagMO.isImm())
896 return -1;
897 const InlineAsm::Flag F(FlagMO.getImm());
898 NumOps = 1 + F.getNumOperandRegisters();
899 if (i + NumOps > OpIdx) {
900 if (GroupNo)
901 *GroupNo = Group;
902 return i;
904 ++Group;
906 return -1;
909 const DILabel *MachineInstr::getDebugLabel() const {
910 assert(isDebugLabel() && "not a DBG_LABEL");
911 return cast<DILabel>(getOperand(0).getMetadata());
914 const MachineOperand &MachineInstr::getDebugVariableOp() const {
915 assert((isDebugValueLike()) && "not a DBG_VALUE*");
916 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
917 return getOperand(VariableOp);
920 MachineOperand &MachineInstr::getDebugVariableOp() {
921 assert((isDebugValueLike()) && "not a DBG_VALUE*");
922 unsigned VariableOp = isNonListDebugValue() ? 2 : 0;
923 return getOperand(VariableOp);
926 const DILocalVariable *MachineInstr::getDebugVariable() const {
927 return cast<DILocalVariable>(getDebugVariableOp().getMetadata());
930 const MachineOperand &MachineInstr::getDebugExpressionOp() const {
931 assert((isDebugValueLike()) && "not a DBG_VALUE*");
932 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
933 return getOperand(ExpressionOp);
936 MachineOperand &MachineInstr::getDebugExpressionOp() {
937 assert((isDebugValueLike()) && "not a DBG_VALUE*");
938 unsigned ExpressionOp = isNonListDebugValue() ? 3 : 1;
939 return getOperand(ExpressionOp);
942 const DIExpression *MachineInstr::getDebugExpression() const {
943 return cast<DIExpression>(getDebugExpressionOp().getMetadata());
946 bool MachineInstr::isDebugEntryValue() const {
947 return isDebugValue() && getDebugExpression()->isEntryValue();
950 const TargetRegisterClass*
951 MachineInstr::getRegClassConstraint(unsigned OpIdx,
952 const TargetInstrInfo *TII,
953 const TargetRegisterInfo *TRI) const {
954 assert(getParent() && "Can't have an MBB reference here!");
955 assert(getMF() && "Can't have an MF reference here!");
956 const MachineFunction &MF = *getMF();
958 // Most opcodes have fixed constraints in their MCInstrDesc.
959 if (!isInlineAsm())
960 return TII->getRegClass(getDesc(), OpIdx, TRI, MF);
962 if (!getOperand(OpIdx).isReg())
963 return nullptr;
965 // For tied uses on inline asm, get the constraint from the def.
966 unsigned DefIdx;
967 if (getOperand(OpIdx).isUse() && isRegTiedToDefOperand(OpIdx, &DefIdx))
968 OpIdx = DefIdx;
970 // Inline asm stores register class constraints in the flag word.
971 int FlagIdx = findInlineAsmFlagIdx(OpIdx);
972 if (FlagIdx < 0)
973 return nullptr;
975 const InlineAsm::Flag F(getOperand(FlagIdx).getImm());
976 unsigned RCID;
977 if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) &&
978 F.hasRegClassConstraint(RCID))
979 return TRI->getRegClass(RCID);
981 // Assume that all registers in a memory operand are pointers.
982 if (F.isMemKind())
983 return TRI->getPointerRegClass(MF);
985 return nullptr;
988 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVReg(
989 Register Reg, const TargetRegisterClass *CurRC, const TargetInstrInfo *TII,
990 const TargetRegisterInfo *TRI, bool ExploreBundle) const {
991 // Check every operands inside the bundle if we have
992 // been asked to.
993 if (ExploreBundle)
994 for (ConstMIBundleOperands OpndIt(*this); OpndIt.isValid() && CurRC;
995 ++OpndIt)
996 CurRC = OpndIt->getParent()->getRegClassConstraintEffectForVRegImpl(
997 OpndIt.getOperandNo(), Reg, CurRC, TII, TRI);
998 else
999 // Otherwise, just check the current operands.
1000 for (unsigned i = 0, e = NumOperands; i < e && CurRC; ++i)
1001 CurRC = getRegClassConstraintEffectForVRegImpl(i, Reg, CurRC, TII, TRI);
1002 return CurRC;
1005 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffectForVRegImpl(
1006 unsigned OpIdx, Register Reg, const TargetRegisterClass *CurRC,
1007 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1008 assert(CurRC && "Invalid initial register class");
1009 // Check if Reg is constrained by some of its use/def from MI.
1010 const MachineOperand &MO = getOperand(OpIdx);
1011 if (!MO.isReg() || MO.getReg() != Reg)
1012 return CurRC;
1013 // If yes, accumulate the constraints through the operand.
1014 return getRegClassConstraintEffect(OpIdx, CurRC, TII, TRI);
1017 const TargetRegisterClass *MachineInstr::getRegClassConstraintEffect(
1018 unsigned OpIdx, const TargetRegisterClass *CurRC,
1019 const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const {
1020 const TargetRegisterClass *OpRC = getRegClassConstraint(OpIdx, TII, TRI);
1021 const MachineOperand &MO = getOperand(OpIdx);
1022 assert(MO.isReg() &&
1023 "Cannot get register constraints for non-register operand");
1024 assert(CurRC && "Invalid initial register class");
1025 if (unsigned SubIdx = MO.getSubReg()) {
1026 if (OpRC)
1027 CurRC = TRI->getMatchingSuperRegClass(CurRC, OpRC, SubIdx);
1028 else
1029 CurRC = TRI->getSubClassWithSubReg(CurRC, SubIdx);
1030 } else if (OpRC)
1031 CurRC = TRI->getCommonSubClass(CurRC, OpRC);
1032 return CurRC;
1035 /// Return the number of instructions inside the MI bundle, not counting the
1036 /// header instruction.
1037 unsigned MachineInstr::getBundleSize() const {
1038 MachineBasicBlock::const_instr_iterator I = getIterator();
1039 unsigned Size = 0;
1040 while (I->isBundledWithSucc()) {
1041 ++Size;
1042 ++I;
1044 return Size;
1047 /// Returns true if the MachineInstr has an implicit-use operand of exactly
1048 /// the given register (not considering sub/super-registers).
1049 bool MachineInstr::hasRegisterImplicitUseOperand(Register Reg) const {
1050 for (const MachineOperand &MO : implicit_operands()) {
1051 if (MO.isReg() && MO.isUse() && MO.getReg() == Reg)
1052 return true;
1054 return false;
1057 /// findRegisterUseOperandIdx() - Returns the MachineOperand that is a use of
1058 /// the specific register or -1 if it is not found. It further tightens
1059 /// the search criteria to a use that kills the register if isKill is true.
1060 int MachineInstr::findRegisterUseOperandIdx(Register Reg,
1061 const TargetRegisterInfo *TRI,
1062 bool isKill) const {
1063 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1064 const MachineOperand &MO = getOperand(i);
1065 if (!MO.isReg() || !MO.isUse())
1066 continue;
1067 Register MOReg = MO.getReg();
1068 if (!MOReg)
1069 continue;
1070 if (MOReg == Reg || (TRI && Reg && MOReg && TRI->regsOverlap(MOReg, Reg)))
1071 if (!isKill || MO.isKill())
1072 return i;
1074 return -1;
1077 /// readsWritesVirtualRegister - Return a pair of bools (reads, writes)
1078 /// indicating if this instruction reads or writes Reg. This also considers
1079 /// partial defines.
1080 std::pair<bool,bool>
1081 MachineInstr::readsWritesVirtualRegister(Register Reg,
1082 SmallVectorImpl<unsigned> *Ops) const {
1083 bool PartDef = false; // Partial redefine.
1084 bool FullDef = false; // Full define.
1085 bool Use = false;
1087 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1088 const MachineOperand &MO = getOperand(i);
1089 if (!MO.isReg() || MO.getReg() != Reg)
1090 continue;
1091 if (Ops)
1092 Ops->push_back(i);
1093 if (MO.isUse())
1094 Use |= !MO.isUndef();
1095 else if (MO.getSubReg() && !MO.isUndef())
1096 // A partial def undef doesn't count as reading the register.
1097 PartDef = true;
1098 else
1099 FullDef = true;
1101 // A partial redefine uses Reg unless there is also a full define.
1102 return std::make_pair(Use || (PartDef && !FullDef), PartDef || FullDef);
1105 /// findRegisterDefOperandIdx() - Returns the operand index that is a def of
1106 /// the specified register or -1 if it is not found. If isDead is true, defs
1107 /// that are not dead are skipped. If TargetRegisterInfo is non-null, then it
1108 /// also checks if there is a def of a super-register.
1109 int MachineInstr::findRegisterDefOperandIdx(Register Reg,
1110 const TargetRegisterInfo *TRI,
1111 bool isDead, bool Overlap) const {
1112 bool isPhys = Reg.isPhysical();
1113 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
1114 const MachineOperand &MO = getOperand(i);
1115 // Accept regmask operands when Overlap is set.
1116 // Ignore them when looking for a specific def operand (Overlap == false).
1117 if (isPhys && Overlap && MO.isRegMask() && MO.clobbersPhysReg(Reg))
1118 return i;
1119 if (!MO.isReg() || !MO.isDef())
1120 continue;
1121 Register MOReg = MO.getReg();
1122 bool Found = (MOReg == Reg);
1123 if (!Found && TRI && isPhys && MOReg.isPhysical()) {
1124 if (Overlap)
1125 Found = TRI->regsOverlap(MOReg, Reg);
1126 else
1127 Found = TRI->isSubRegister(MOReg, Reg);
1129 if (Found && (!isDead || MO.isDead()))
1130 return i;
1132 return -1;
1135 /// findFirstPredOperandIdx() - Find the index of the first operand in the
1136 /// operand list that is used to represent the predicate. It returns -1 if
1137 /// none is found.
1138 int MachineInstr::findFirstPredOperandIdx() const {
1139 // Don't call MCID.findFirstPredOperandIdx() because this variant
1140 // is sometimes called on an instruction that's not yet complete, and
1141 // so the number of operands is less than the MCID indicates. In
1142 // particular, the PTX target does this.
1143 const MCInstrDesc &MCID = getDesc();
1144 if (MCID.isPredicable()) {
1145 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
1146 if (MCID.operands()[i].isPredicate())
1147 return i;
1150 return -1;
1153 // MachineOperand::TiedTo is 4 bits wide.
1154 const unsigned TiedMax = 15;
1156 /// tieOperands - Mark operands at DefIdx and UseIdx as tied to each other.
1158 /// Use and def operands can be tied together, indicated by a non-zero TiedTo
1159 /// field. TiedTo can have these values:
1161 /// 0: Operand is not tied to anything.
1162 /// 1 to TiedMax-1: Tied to getOperand(TiedTo-1).
1163 /// TiedMax: Tied to an operand >= TiedMax-1.
1165 /// The tied def must be one of the first TiedMax operands on a normal
1166 /// instruction. INLINEASM instructions allow more tied defs.
1168 void MachineInstr::tieOperands(unsigned DefIdx, unsigned UseIdx) {
1169 MachineOperand &DefMO = getOperand(DefIdx);
1170 MachineOperand &UseMO = getOperand(UseIdx);
1171 assert(DefMO.isDef() && "DefIdx must be a def operand");
1172 assert(UseMO.isUse() && "UseIdx must be a use operand");
1173 assert(!DefMO.isTied() && "Def is already tied to another use");
1174 assert(!UseMO.isTied() && "Use is already tied to another def");
1176 if (DefIdx < TiedMax)
1177 UseMO.TiedTo = DefIdx + 1;
1178 else {
1179 // Inline asm can use the group descriptors to find tied operands,
1180 // statepoint tied operands are trivial to match (1-1 reg def with reg use),
1181 // but on normal instruction, the tied def must be within the first TiedMax
1182 // operands.
1183 assert((isInlineAsm() || getOpcode() == TargetOpcode::STATEPOINT) &&
1184 "DefIdx out of range");
1185 UseMO.TiedTo = TiedMax;
1188 // UseIdx can be out of range, we'll search for it in findTiedOperandIdx().
1189 DefMO.TiedTo = std::min(UseIdx + 1, TiedMax);
1192 /// Given the index of a tied register operand, find the operand it is tied to.
1193 /// Defs are tied to uses and vice versa. Returns the index of the tied operand
1194 /// which must exist.
1195 unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const {
1196 const MachineOperand &MO = getOperand(OpIdx);
1197 assert(MO.isTied() && "Operand isn't tied");
1199 // Normally TiedTo is in range.
1200 if (MO.TiedTo < TiedMax)
1201 return MO.TiedTo - 1;
1203 // Uses on normal instructions can be out of range.
1204 if (!isInlineAsm() && getOpcode() != TargetOpcode::STATEPOINT) {
1205 // Normal tied defs must be in the 0..TiedMax-1 range.
1206 if (MO.isUse())
1207 return TiedMax - 1;
1208 // MO is a def. Search for the tied use.
1209 for (unsigned i = TiedMax - 1, e = getNumOperands(); i != e; ++i) {
1210 const MachineOperand &UseMO = getOperand(i);
1211 if (UseMO.isReg() && UseMO.isUse() && UseMO.TiedTo == OpIdx + 1)
1212 return i;
1214 llvm_unreachable("Can't find tied use");
1217 if (getOpcode() == TargetOpcode::STATEPOINT) {
1218 // In STATEPOINT defs correspond 1-1 to GC pointer operands passed
1219 // on registers.
1220 StatepointOpers SO(this);
1221 unsigned CurUseIdx = SO.getFirstGCPtrIdx();
1222 assert(CurUseIdx != -1U && "only gc pointer statepoint operands can be tied");
1223 unsigned NumDefs = getNumDefs();
1224 for (unsigned CurDefIdx = 0; CurDefIdx < NumDefs; ++CurDefIdx) {
1225 while (!getOperand(CurUseIdx).isReg())
1226 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1227 if (OpIdx == CurDefIdx)
1228 return CurUseIdx;
1229 if (OpIdx == CurUseIdx)
1230 return CurDefIdx;
1231 CurUseIdx = StackMaps::getNextMetaArgIdx(this, CurUseIdx);
1233 llvm_unreachable("Can't find tied use");
1236 // Now deal with inline asm by parsing the operand group descriptor flags.
1237 // Find the beginning of each operand group.
1238 SmallVector<unsigned, 8> GroupIdx;
1239 unsigned OpIdxGroup = ~0u;
1240 unsigned NumOps;
1241 for (unsigned i = InlineAsm::MIOp_FirstOperand, e = getNumOperands(); i < e;
1242 i += NumOps) {
1243 const MachineOperand &FlagMO = getOperand(i);
1244 assert(FlagMO.isImm() && "Invalid tied operand on inline asm");
1245 unsigned CurGroup = GroupIdx.size();
1246 GroupIdx.push_back(i);
1247 const InlineAsm::Flag F(FlagMO.getImm());
1248 NumOps = 1 + F.getNumOperandRegisters();
1249 // OpIdx belongs to this operand group.
1250 if (OpIdx > i && OpIdx < i + NumOps)
1251 OpIdxGroup = CurGroup;
1252 unsigned TiedGroup;
1253 if (!F.isUseOperandTiedToDef(TiedGroup))
1254 continue;
1255 // Operands in this group are tied to operands in TiedGroup which must be
1256 // earlier. Find the number of operands between the two groups.
1257 unsigned Delta = i - GroupIdx[TiedGroup];
1259 // OpIdx is a use tied to TiedGroup.
1260 if (OpIdxGroup == CurGroup)
1261 return OpIdx - Delta;
1263 // OpIdx is a def tied to this use group.
1264 if (OpIdxGroup == TiedGroup)
1265 return OpIdx + Delta;
1267 llvm_unreachable("Invalid tied operand on inline asm");
1270 /// clearKillInfo - Clears kill flags on all operands.
1272 void MachineInstr::clearKillInfo() {
1273 for (MachineOperand &MO : operands()) {
1274 if (MO.isReg() && MO.isUse())
1275 MO.setIsKill(false);
1279 void MachineInstr::substituteRegister(Register FromReg, Register ToReg,
1280 unsigned SubIdx,
1281 const TargetRegisterInfo &RegInfo) {
1282 if (ToReg.isPhysical()) {
1283 if (SubIdx)
1284 ToReg = RegInfo.getSubReg(ToReg, SubIdx);
1285 for (MachineOperand &MO : operands()) {
1286 if (!MO.isReg() || MO.getReg() != FromReg)
1287 continue;
1288 MO.substPhysReg(ToReg, RegInfo);
1290 } else {
1291 for (MachineOperand &MO : operands()) {
1292 if (!MO.isReg() || MO.getReg() != FromReg)
1293 continue;
1294 MO.substVirtReg(ToReg, SubIdx, RegInfo);
1299 /// isSafeToMove - Return true if it is safe to move this instruction. If
1300 /// SawStore is set to true, it means that there is a store (or call) between
1301 /// the instruction's location and its intended destination.
1302 bool MachineInstr::isSafeToMove(bool &SawStore) const {
1303 // Ignore stuff that we obviously can't move.
1305 // Treat volatile loads as stores. This is not strictly necessary for
1306 // volatiles, but it is required for atomic loads. It is not allowed to move
1307 // a load across an atomic load with Ordering > Monotonic.
1308 if (mayStore() || isCall() || isPHI() ||
1309 (mayLoad() && hasOrderedMemoryRef())) {
1310 SawStore = true;
1311 return false;
1314 if (isPosition() || isDebugInstr() || isTerminator() ||
1315 mayRaiseFPException() || hasUnmodeledSideEffects() ||
1316 isJumpTableDebugInfo())
1317 return false;
1319 // See if this instruction does a load. If so, we have to guarantee that the
1320 // loaded value doesn't change between the load and the its intended
1321 // destination. The check for isInvariantLoad gives the target the chance to
1322 // classify the load as always returning a constant, e.g. a constant pool
1323 // load.
1324 if (mayLoad() && !isDereferenceableInvariantLoad())
1325 // Otherwise, this is a real load. If there is a store between the load and
1326 // end of block, we can't move it.
1327 return !SawStore;
1329 return true;
1332 bool MachineInstr::wouldBeTriviallyDead() const {
1333 // Don't delete frame allocation labels.
1334 // FIXME: Why is LOCAL_ESCAPE not considered in MachineInstr::isLabel?
1335 if (getOpcode() == TargetOpcode::LOCAL_ESCAPE)
1336 return false;
1338 // Don't delete FAKE_USE.
1339 // FIXME: Why is FAKE_USE not considered in MachineInstr::isPosition?
1340 if (isFakeUse())
1341 return false;
1343 // LIFETIME markers should be preserved.
1344 // FIXME: Why are LIFETIME markers not considered in MachineInstr::isPosition?
1345 if (isLifetimeMarker())
1346 return false;
1348 // If we can move an instruction, we can remove it. Otherwise, it has
1349 // a side-effect of some sort.
1350 bool SawStore = false;
1351 return isPHI() || isSafeToMove(SawStore);
1354 bool MachineInstr::isDead(const MachineRegisterInfo &MRI,
1355 LiveRegUnits *LivePhysRegs) const {
1356 // Instructions without side-effects are dead iff they only define dead regs.
1357 // This function is hot and this loop returns early in the common case,
1358 // so only perform additional checks before this if absolutely necessary.
1359 for (const MachineOperand &MO : all_defs()) {
1360 Register Reg = MO.getReg();
1361 if (Reg.isPhysical()) {
1362 // Don't delete live physreg defs, or any reserved register defs.
1363 if (!LivePhysRegs || !LivePhysRegs->available(Reg) || MRI.isReserved(Reg))
1364 return false;
1365 } else {
1366 if (MO.isDead())
1367 continue;
1368 for (const MachineInstr &Use : MRI.use_nodbg_instructions(Reg)) {
1369 if (&Use != this)
1370 // This def has a non-debug use. Don't delete the instruction!
1371 return false;
1376 // Technically speaking inline asm without side effects and no defs can still
1377 // be deleted. But there is so much bad inline asm code out there, we should
1378 // let them be.
1379 if (isInlineAsm())
1380 return false;
1382 // FIXME: See issue #105950 for why LIFETIME markers are considered dead here.
1383 if (isLifetimeMarker())
1384 return true;
1386 // If there are no defs with uses, then we call the instruction dead so long
1387 // as we do not suspect it may have sideeffects.
1388 return wouldBeTriviallyDead();
1391 static bool MemOperandsHaveAlias(const MachineFrameInfo &MFI,
1392 BatchAAResults *AA, bool UseTBAA,
1393 const MachineMemOperand *MMOa,
1394 const MachineMemOperand *MMOb) {
1395 // The following interface to AA is fashioned after DAGCombiner::isAlias and
1396 // operates with MachineMemOperand offset with some important assumptions:
1397 // - LLVM fundamentally assumes flat address spaces.
1398 // - MachineOperand offset can *only* result from legalization and cannot
1399 // affect queries other than the trivial case of overlap checking.
1400 // - These offsets never wrap and never step outside of allocated objects.
1401 // - There should never be any negative offsets here.
1403 // FIXME: Modify API to hide this math from "user"
1404 // Even before we go to AA we can reason locally about some memory objects. It
1405 // can save compile time, and possibly catch some corner cases not currently
1406 // covered.
1408 int64_t OffsetA = MMOa->getOffset();
1409 int64_t OffsetB = MMOb->getOffset();
1410 int64_t MinOffset = std::min(OffsetA, OffsetB);
1412 LocationSize WidthA = MMOa->getSize();
1413 LocationSize WidthB = MMOb->getSize();
1414 bool KnownWidthA = WidthA.hasValue();
1415 bool KnownWidthB = WidthB.hasValue();
1416 bool BothMMONonScalable = !WidthA.isScalable() && !WidthB.isScalable();
1418 const Value *ValA = MMOa->getValue();
1419 const Value *ValB = MMOb->getValue();
1420 bool SameVal = (ValA && ValB && (ValA == ValB));
1421 if (!SameVal) {
1422 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1423 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1424 if (PSVa && ValB && !PSVa->mayAlias(&MFI))
1425 return false;
1426 if (PSVb && ValA && !PSVb->mayAlias(&MFI))
1427 return false;
1428 if (PSVa && PSVb && (PSVa == PSVb))
1429 SameVal = true;
1432 if (SameVal && BothMMONonScalable) {
1433 if (!KnownWidthA || !KnownWidthB)
1434 return true;
1435 int64_t MaxOffset = std::max(OffsetA, OffsetB);
1436 int64_t LowWidth = (MinOffset == OffsetA)
1437 ? WidthA.getValue().getKnownMinValue()
1438 : WidthB.getValue().getKnownMinValue();
1439 return (MinOffset + LowWidth > MaxOffset);
1442 if (!AA)
1443 return true;
1445 if (!ValA || !ValB)
1446 return true;
1448 assert((OffsetA >= 0) && "Negative MachineMemOperand offset");
1449 assert((OffsetB >= 0) && "Negative MachineMemOperand offset");
1451 // If Scalable Location Size has non-zero offset, Width + Offset does not work
1452 // at the moment
1453 if ((WidthA.isScalable() && OffsetA > 0) ||
1454 (WidthB.isScalable() && OffsetB > 0))
1455 return true;
1457 int64_t OverlapA =
1458 KnownWidthA ? WidthA.getValue().getKnownMinValue() + OffsetA - MinOffset
1459 : MemoryLocation::UnknownSize;
1460 int64_t OverlapB =
1461 KnownWidthB ? WidthB.getValue().getKnownMinValue() + OffsetB - MinOffset
1462 : MemoryLocation::UnknownSize;
1464 LocationSize LocA = (WidthA.isScalable() || !KnownWidthA)
1465 ? WidthA
1466 : LocationSize::precise(OverlapA);
1467 LocationSize LocB = (WidthB.isScalable() || !KnownWidthB)
1468 ? WidthB
1469 : LocationSize::precise(OverlapB);
1471 return !AA->isNoAlias(
1472 MemoryLocation(ValA, LocA, UseTBAA ? MMOa->getAAInfo() : AAMDNodes()),
1473 MemoryLocation(ValB, LocB, UseTBAA ? MMOb->getAAInfo() : AAMDNodes()));
1476 bool MachineInstr::mayAlias(BatchAAResults *AA, const MachineInstr &Other,
1477 bool UseTBAA) const {
1478 const MachineFunction *MF = getMF();
1479 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1480 const MachineFrameInfo &MFI = MF->getFrameInfo();
1482 // Exclude call instruction which may alter the memory but can not be handled
1483 // by this function.
1484 if (isCall() || Other.isCall())
1485 return true;
1487 // If neither instruction stores to memory, they can't alias in any
1488 // meaningful way, even if they read from the same address.
1489 if (!mayStore() && !Other.mayStore())
1490 return false;
1492 // Both instructions must be memory operations to be able to alias.
1493 if (!mayLoadOrStore() || !Other.mayLoadOrStore())
1494 return false;
1496 // Let the target decide if memory accesses cannot possibly overlap.
1497 if (TII->areMemAccessesTriviallyDisjoint(*this, Other))
1498 return false;
1500 // Memory operations without memory operands may access anything. Be
1501 // conservative and assume `MayAlias`.
1502 if (memoperands_empty() || Other.memoperands_empty())
1503 return true;
1505 // Skip if there are too many memory operands.
1506 auto NumChecks = getNumMemOperands() * Other.getNumMemOperands();
1507 if (NumChecks > TII->getMemOperandAACheckLimit())
1508 return true;
1510 // Check each pair of memory operands from both instructions, which can't
1511 // alias only if all pairs won't alias.
1512 for (auto *MMOa : memoperands())
1513 for (auto *MMOb : Other.memoperands())
1514 if (MemOperandsHaveAlias(MFI, AA, UseTBAA, MMOa, MMOb))
1515 return true;
1517 return false;
1520 bool MachineInstr::mayAlias(AAResults *AA, const MachineInstr &Other,
1521 bool UseTBAA) const {
1522 if (AA) {
1523 BatchAAResults BAA(*AA);
1524 return mayAlias(&BAA, Other, UseTBAA);
1526 return mayAlias(static_cast<BatchAAResults *>(nullptr), Other, UseTBAA);
1529 /// hasOrderedMemoryRef - Return true if this instruction may have an ordered
1530 /// or volatile memory reference, or if the information describing the memory
1531 /// reference is not available. Return false if it is known to have no ordered
1532 /// memory references.
1533 bool MachineInstr::hasOrderedMemoryRef() const {
1534 // An instruction known never to access memory won't have a volatile access.
1535 if (!mayStore() &&
1536 !mayLoad() &&
1537 !isCall() &&
1538 !hasUnmodeledSideEffects())
1539 return false;
1541 // Otherwise, if the instruction has no memory reference information,
1542 // conservatively assume it wasn't preserved.
1543 if (memoperands_empty())
1544 return true;
1546 // Check if any of our memory operands are ordered.
1547 return llvm::any_of(memoperands(), [](const MachineMemOperand *MMO) {
1548 return !MMO->isUnordered();
1552 /// isDereferenceableInvariantLoad - Return true if this instruction will never
1553 /// trap and is loading from a location whose value is invariant across a run of
1554 /// this function.
1555 bool MachineInstr::isDereferenceableInvariantLoad() const {
1556 // If the instruction doesn't load at all, it isn't an invariant load.
1557 if (!mayLoad())
1558 return false;
1560 // If the instruction has lost its memoperands, conservatively assume that
1561 // it may not be an invariant load.
1562 if (memoperands_empty())
1563 return false;
1565 const MachineFrameInfo &MFI = getParent()->getParent()->getFrameInfo();
1567 for (MachineMemOperand *MMO : memoperands()) {
1568 if (!MMO->isUnordered())
1569 // If the memory operand has ordering side effects, we can't move the
1570 // instruction. Such an instruction is technically an invariant load,
1571 // but the caller code would need updated to expect that.
1572 return false;
1573 if (MMO->isStore()) return false;
1574 if (MMO->isInvariant() && MMO->isDereferenceable())
1575 continue;
1577 // A load from a constant PseudoSourceValue is invariant.
1578 if (const PseudoSourceValue *PSV = MMO->getPseudoValue()) {
1579 if (PSV->isConstant(&MFI))
1580 continue;
1583 // Otherwise assume conservatively.
1584 return false;
1587 // Everything checks out.
1588 return true;
1591 Register MachineInstr::isConstantValuePHI() const {
1592 if (!isPHI())
1593 return {};
1594 assert(getNumOperands() >= 3 &&
1595 "It's illegal to have a PHI without source operands");
1597 Register Reg = getOperand(1).getReg();
1598 for (unsigned i = 3, e = getNumOperands(); i < e; i += 2)
1599 if (getOperand(i).getReg() != Reg)
1600 return {};
1601 return Reg;
1604 bool MachineInstr::hasUnmodeledSideEffects() const {
1605 if (hasProperty(MCID::UnmodeledSideEffects))
1606 return true;
1607 if (isInlineAsm()) {
1608 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1609 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1610 return true;
1613 return false;
1616 bool MachineInstr::isLoadFoldBarrier() const {
1617 return mayStore() || isCall() ||
1618 (hasUnmodeledSideEffects() && !isPseudoProbe());
1621 /// allDefsAreDead - Return true if all the defs of this instruction are dead.
1623 bool MachineInstr::allDefsAreDead() const {
1624 for (const MachineOperand &MO : operands()) {
1625 if (!MO.isReg() || MO.isUse())
1626 continue;
1627 if (!MO.isDead())
1628 return false;
1630 return true;
1633 bool MachineInstr::allImplicitDefsAreDead() const {
1634 for (const MachineOperand &MO : implicit_operands()) {
1635 if (!MO.isReg() || MO.isUse())
1636 continue;
1637 if (!MO.isDead())
1638 return false;
1640 return true;
1643 /// copyImplicitOps - Copy implicit register operands from specified
1644 /// instruction to this instruction.
1645 void MachineInstr::copyImplicitOps(MachineFunction &MF,
1646 const MachineInstr &MI) {
1647 for (const MachineOperand &MO :
1648 llvm::drop_begin(MI.operands(), MI.getDesc().getNumOperands()))
1649 if ((MO.isReg() && MO.isImplicit()) || MO.isRegMask())
1650 addOperand(MF, MO);
1653 bool MachineInstr::hasComplexRegisterTies() const {
1654 const MCInstrDesc &MCID = getDesc();
1655 if (MCID.Opcode == TargetOpcode::STATEPOINT)
1656 return true;
1657 for (unsigned I = 0, E = getNumOperands(); I < E; ++I) {
1658 const auto &Operand = getOperand(I);
1659 if (!Operand.isReg() || Operand.isDef())
1660 // Ignore the defined registers as MCID marks only the uses as tied.
1661 continue;
1662 int ExpectedTiedIdx = MCID.getOperandConstraint(I, MCOI::TIED_TO);
1663 int TiedIdx = Operand.isTied() ? int(findTiedOperandIdx(I)) : -1;
1664 if (ExpectedTiedIdx != TiedIdx)
1665 return true;
1667 return false;
1670 LLT MachineInstr::getTypeToPrint(unsigned OpIdx, SmallBitVector &PrintedTypes,
1671 const MachineRegisterInfo &MRI) const {
1672 const MachineOperand &Op = getOperand(OpIdx);
1673 if (!Op.isReg())
1674 return LLT{};
1676 if (isVariadic() || OpIdx >= getNumExplicitOperands())
1677 return MRI.getType(Op.getReg());
1679 auto &OpInfo = getDesc().operands()[OpIdx];
1680 if (!OpInfo.isGenericType())
1681 return MRI.getType(Op.getReg());
1683 if (PrintedTypes[OpInfo.getGenericTypeIndex()])
1684 return LLT{};
1686 LLT TypeToPrint = MRI.getType(Op.getReg());
1687 // Don't mark the type index printed if it wasn't actually printed: maybe
1688 // another operand with the same type index has an actual type attached:
1689 if (TypeToPrint.isValid())
1690 PrintedTypes.set(OpInfo.getGenericTypeIndex());
1691 return TypeToPrint;
1694 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1695 LLVM_DUMP_METHOD void MachineInstr::dump() const {
1696 dbgs() << " ";
1697 print(dbgs());
1700 LLVM_DUMP_METHOD void MachineInstr::dumprImpl(
1701 const MachineRegisterInfo &MRI, unsigned Depth, unsigned MaxDepth,
1702 SmallPtrSetImpl<const MachineInstr *> &AlreadySeenInstrs) const {
1703 if (Depth >= MaxDepth)
1704 return;
1705 if (!AlreadySeenInstrs.insert(this).second)
1706 return;
1707 // PadToColumn always inserts at least one space.
1708 // Don't mess up the alignment if we don't want any space.
1709 if (Depth)
1710 fdbgs().PadToColumn(Depth * 2);
1711 print(fdbgs());
1712 for (const MachineOperand &MO : operands()) {
1713 if (!MO.isReg() || MO.isDef())
1714 continue;
1715 Register Reg = MO.getReg();
1716 if (Reg.isPhysical())
1717 continue;
1718 const MachineInstr *NewMI = MRI.getUniqueVRegDef(Reg);
1719 if (NewMI == nullptr)
1720 continue;
1721 NewMI->dumprImpl(MRI, Depth + 1, MaxDepth, AlreadySeenInstrs);
1725 LLVM_DUMP_METHOD void MachineInstr::dumpr(const MachineRegisterInfo &MRI,
1726 unsigned MaxDepth) const {
1727 SmallPtrSet<const MachineInstr *, 16> AlreadySeenInstrs;
1728 dumprImpl(MRI, 0, MaxDepth, AlreadySeenInstrs);
1730 #endif
1732 void MachineInstr::print(raw_ostream &OS, bool IsStandalone, bool SkipOpers,
1733 bool SkipDebugLoc, bool AddNewLine,
1734 const TargetInstrInfo *TII) const {
1735 const Module *M = nullptr;
1736 const Function *F = nullptr;
1737 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
1738 F = &MF->getFunction();
1739 M = F->getParent();
1740 if (!TII)
1741 TII = MF->getSubtarget().getInstrInfo();
1744 ModuleSlotTracker MST(M);
1745 if (F)
1746 MST.incorporateFunction(*F);
1747 print(OS, MST, IsStandalone, SkipOpers, SkipDebugLoc, AddNewLine, TII);
1750 void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST,
1751 bool IsStandalone, bool SkipOpers, bool SkipDebugLoc,
1752 bool AddNewLine, const TargetInstrInfo *TII) const {
1753 // We can be a bit tidier if we know the MachineFunction.
1754 const TargetRegisterInfo *TRI = nullptr;
1755 const MachineRegisterInfo *MRI = nullptr;
1756 const TargetIntrinsicInfo *IntrinsicInfo = nullptr;
1757 tryToGetTargetInfo(*this, TRI, MRI, IntrinsicInfo, TII);
1759 if (isCFIInstruction())
1760 assert(getNumOperands() == 1 && "Expected 1 operand in CFI instruction");
1762 SmallBitVector PrintedTypes(8);
1763 bool ShouldPrintRegisterTies = IsStandalone || hasComplexRegisterTies();
1764 auto getTiedOperandIdx = [&](unsigned OpIdx) {
1765 if (!ShouldPrintRegisterTies)
1766 return 0U;
1767 const MachineOperand &MO = getOperand(OpIdx);
1768 if (MO.isReg() && MO.isTied() && !MO.isDef())
1769 return findTiedOperandIdx(OpIdx);
1770 return 0U;
1772 unsigned StartOp = 0;
1773 unsigned e = getNumOperands();
1775 // Print explicitly defined operands on the left of an assignment syntax.
1776 while (StartOp < e) {
1777 const MachineOperand &MO = getOperand(StartOp);
1778 if (!MO.isReg() || !MO.isDef() || MO.isImplicit())
1779 break;
1781 if (StartOp != 0)
1782 OS << ", ";
1784 LLT TypeToPrint = MRI ? getTypeToPrint(StartOp, PrintedTypes, *MRI) : LLT{};
1785 unsigned TiedOperandIdx = getTiedOperandIdx(StartOp);
1786 MO.print(OS, MST, TypeToPrint, StartOp, /*PrintDef=*/false, IsStandalone,
1787 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1788 ++StartOp;
1791 if (StartOp != 0)
1792 OS << " = ";
1794 if (getFlag(MachineInstr::FrameSetup))
1795 OS << "frame-setup ";
1796 if (getFlag(MachineInstr::FrameDestroy))
1797 OS << "frame-destroy ";
1798 if (getFlag(MachineInstr::FmNoNans))
1799 OS << "nnan ";
1800 if (getFlag(MachineInstr::FmNoInfs))
1801 OS << "ninf ";
1802 if (getFlag(MachineInstr::FmNsz))
1803 OS << "nsz ";
1804 if (getFlag(MachineInstr::FmArcp))
1805 OS << "arcp ";
1806 if (getFlag(MachineInstr::FmContract))
1807 OS << "contract ";
1808 if (getFlag(MachineInstr::FmAfn))
1809 OS << "afn ";
1810 if (getFlag(MachineInstr::FmReassoc))
1811 OS << "reassoc ";
1812 if (getFlag(MachineInstr::NoUWrap))
1813 OS << "nuw ";
1814 if (getFlag(MachineInstr::NoSWrap))
1815 OS << "nsw ";
1816 if (getFlag(MachineInstr::IsExact))
1817 OS << "exact ";
1818 if (getFlag(MachineInstr::NoFPExcept))
1819 OS << "nofpexcept ";
1820 if (getFlag(MachineInstr::NoMerge))
1821 OS << "nomerge ";
1822 if (getFlag(MachineInstr::NonNeg))
1823 OS << "nneg ";
1824 if (getFlag(MachineInstr::Disjoint))
1825 OS << "disjoint ";
1826 if (getFlag(MachineInstr::SameSign))
1827 OS << "samesign ";
1829 // Print the opcode name.
1830 if (TII)
1831 OS << TII->getName(getOpcode());
1832 else
1833 OS << "UNKNOWN";
1835 if (SkipOpers)
1836 return;
1838 // Print the rest of the operands.
1839 bool FirstOp = true;
1840 unsigned AsmDescOp = ~0u;
1841 unsigned AsmOpCount = 0;
1843 if (isInlineAsm() && e >= InlineAsm::MIOp_FirstOperand) {
1844 // Print asm string.
1845 OS << " ";
1846 const unsigned OpIdx = InlineAsm::MIOp_AsmString;
1847 LLT TypeToPrint = MRI ? getTypeToPrint(OpIdx, PrintedTypes, *MRI) : LLT{};
1848 unsigned TiedOperandIdx = getTiedOperandIdx(OpIdx);
1849 getOperand(OpIdx).print(OS, MST, TypeToPrint, OpIdx, /*PrintDef=*/true, IsStandalone,
1850 ShouldPrintRegisterTies, TiedOperandIdx, TRI,
1851 IntrinsicInfo);
1853 // Print HasSideEffects, MayLoad, MayStore, IsAlignStack
1854 unsigned ExtraInfo = getOperand(InlineAsm::MIOp_ExtraInfo).getImm();
1855 if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
1856 OS << " [sideeffect]";
1857 if (ExtraInfo & InlineAsm::Extra_MayLoad)
1858 OS << " [mayload]";
1859 if (ExtraInfo & InlineAsm::Extra_MayStore)
1860 OS << " [maystore]";
1861 if (ExtraInfo & InlineAsm::Extra_IsConvergent)
1862 OS << " [isconvergent]";
1863 if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
1864 OS << " [alignstack]";
1865 if (getInlineAsmDialect() == InlineAsm::AD_ATT)
1866 OS << " [attdialect]";
1867 if (getInlineAsmDialect() == InlineAsm::AD_Intel)
1868 OS << " [inteldialect]";
1870 StartOp = AsmDescOp = InlineAsm::MIOp_FirstOperand;
1871 FirstOp = false;
1874 for (unsigned i = StartOp, e = getNumOperands(); i != e; ++i) {
1875 const MachineOperand &MO = getOperand(i);
1877 if (FirstOp) FirstOp = false; else OS << ",";
1878 OS << " ";
1880 if (isDebugValueLike() && MO.isMetadata()) {
1881 // Pretty print DBG_VALUE* instructions.
1882 auto *DIV = dyn_cast<DILocalVariable>(MO.getMetadata());
1883 if (DIV && !DIV->getName().empty())
1884 OS << "!\"" << DIV->getName() << '\"';
1885 else {
1886 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1887 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1888 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1889 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1891 } else if (isDebugLabel() && MO.isMetadata()) {
1892 // Pretty print DBG_LABEL instructions.
1893 auto *DIL = dyn_cast<DILabel>(MO.getMetadata());
1894 if (DIL && !DIL->getName().empty())
1895 OS << "\"" << DIL->getName() << '\"';
1896 else {
1897 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1898 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1899 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1900 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1902 } else if (i == AsmDescOp && MO.isImm()) {
1903 // Pretty print the inline asm operand descriptor.
1904 OS << '$' << AsmOpCount++;
1905 unsigned Flag = MO.getImm();
1906 const InlineAsm::Flag F(Flag);
1907 OS << ":[";
1908 OS << F.getKindName();
1910 unsigned RCID;
1911 if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) {
1912 if (TRI) {
1913 OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID));
1914 } else
1915 OS << ":RC" << RCID;
1918 if (F.isMemKind()) {
1919 const InlineAsm::ConstraintCode MCID = F.getMemoryConstraintID();
1920 OS << ":" << InlineAsm::getMemConstraintName(MCID);
1923 unsigned TiedTo;
1924 if (F.isUseOperandTiedToDef(TiedTo))
1925 OS << " tiedto:$" << TiedTo;
1927 if ((F.isRegDefKind() || F.isRegDefEarlyClobberKind() ||
1928 F.isRegUseKind()) &&
1929 F.getRegMayBeFolded()) {
1930 OS << " foldable";
1933 OS << ']';
1935 // Compute the index of the next operand descriptor.
1936 AsmDescOp += 1 + F.getNumOperandRegisters();
1937 } else {
1938 LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{};
1939 unsigned TiedOperandIdx = getTiedOperandIdx(i);
1940 if (MO.isImm() && isOperandSubregIdx(i))
1941 MachineOperand::printSubRegIdx(OS, MO.getImm(), TRI);
1942 else
1943 MO.print(OS, MST, TypeToPrint, i, /*PrintDef=*/true, IsStandalone,
1944 ShouldPrintRegisterTies, TiedOperandIdx, TRI, IntrinsicInfo);
1948 // Print any optional symbols attached to this instruction as-if they were
1949 // operands.
1950 if (MCSymbol *PreInstrSymbol = getPreInstrSymbol()) {
1951 if (!FirstOp) {
1952 FirstOp = false;
1953 OS << ',';
1955 OS << " pre-instr-symbol ";
1956 MachineOperand::printSymbol(OS, *PreInstrSymbol);
1958 if (MCSymbol *PostInstrSymbol = getPostInstrSymbol()) {
1959 if (!FirstOp) {
1960 FirstOp = false;
1961 OS << ',';
1963 OS << " post-instr-symbol ";
1964 MachineOperand::printSymbol(OS, *PostInstrSymbol);
1966 if (MDNode *HeapAllocMarker = getHeapAllocMarker()) {
1967 if (!FirstOp) {
1968 FirstOp = false;
1969 OS << ',';
1971 OS << " heap-alloc-marker ";
1972 HeapAllocMarker->printAsOperand(OS, MST);
1974 if (MDNode *PCSections = getPCSections()) {
1975 if (!FirstOp) {
1976 FirstOp = false;
1977 OS << ',';
1979 OS << " pcsections ";
1980 PCSections->printAsOperand(OS, MST);
1982 if (MDNode *MMRA = getMMRAMetadata()) {
1983 if (!FirstOp) {
1984 FirstOp = false;
1985 OS << ',';
1987 OS << " mmra ";
1988 MMRA->printAsOperand(OS, MST);
1990 if (uint32_t CFIType = getCFIType()) {
1991 if (!FirstOp)
1992 OS << ',';
1993 OS << " cfi-type " << CFIType;
1996 if (DebugInstrNum) {
1997 if (!FirstOp)
1998 OS << ",";
1999 OS << " debug-instr-number " << DebugInstrNum;
2002 if (!SkipDebugLoc) {
2003 if (const DebugLoc &DL = getDebugLoc()) {
2004 if (!FirstOp)
2005 OS << ',';
2006 OS << " debug-location ";
2007 DL->printAsOperand(OS, MST);
2011 if (!memoperands_empty()) {
2012 SmallVector<StringRef, 0> SSNs;
2013 const LLVMContext *Context = nullptr;
2014 std::unique_ptr<LLVMContext> CtxPtr;
2015 const MachineFrameInfo *MFI = nullptr;
2016 if (const MachineFunction *MF = getMFIfAvailable(*this)) {
2017 MFI = &MF->getFrameInfo();
2018 Context = &MF->getFunction().getContext();
2019 } else {
2020 CtxPtr = std::make_unique<LLVMContext>();
2021 Context = CtxPtr.get();
2024 OS << " :: ";
2025 bool NeedComma = false;
2026 for (const MachineMemOperand *Op : memoperands()) {
2027 if (NeedComma)
2028 OS << ", ";
2029 Op->print(OS, MST, SSNs, *Context, MFI, TII);
2030 NeedComma = true;
2034 if (SkipDebugLoc)
2035 return;
2037 bool HaveSemi = false;
2039 // Print debug location information.
2040 if (const DebugLoc &DL = getDebugLoc()) {
2041 if (!HaveSemi) {
2042 OS << ';';
2043 HaveSemi = true;
2045 OS << ' ';
2046 DL.print(OS);
2049 // Print extra comments for DEBUG_VALUE and friends if they are well-formed.
2050 if ((isNonListDebugValue() && getNumOperands() >= 4) ||
2051 (isDebugValueList() && getNumOperands() >= 2) ||
2052 (isDebugRef() && getNumOperands() >= 3)) {
2053 if (getDebugVariableOp().isMetadata()) {
2054 if (!HaveSemi) {
2055 OS << ";";
2056 HaveSemi = true;
2058 auto *DV = getDebugVariable();
2059 OS << " line no:" << DV->getLine();
2060 if (isIndirectDebugValue())
2061 OS << " indirect";
2064 // TODO: DBG_LABEL
2066 if (AddNewLine)
2067 OS << '\n';
2070 bool MachineInstr::addRegisterKilled(Register IncomingReg,
2071 const TargetRegisterInfo *RegInfo,
2072 bool AddIfNotFound) {
2073 bool isPhysReg = IncomingReg.isPhysical();
2074 bool hasAliases = isPhysReg &&
2075 MCRegAliasIterator(IncomingReg, RegInfo, false).isValid();
2076 bool Found = false;
2077 SmallVector<unsigned,4> DeadOps;
2078 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2079 MachineOperand &MO = getOperand(i);
2080 if (!MO.isReg() || !MO.isUse() || MO.isUndef())
2081 continue;
2083 // DEBUG_VALUE nodes do not contribute to code generation and should
2084 // always be ignored. Failure to do so may result in trying to modify
2085 // KILL flags on DEBUG_VALUE nodes.
2086 if (MO.isDebug())
2087 continue;
2089 Register Reg = MO.getReg();
2090 if (!Reg)
2091 continue;
2093 if (Reg == IncomingReg) {
2094 if (!Found) {
2095 if (MO.isKill())
2096 // The register is already marked kill.
2097 return true;
2098 if (isPhysReg && isRegTiedToDefOperand(i))
2099 // Two-address uses of physregs must not be marked kill.
2100 return true;
2101 MO.setIsKill();
2102 Found = true;
2104 } else if (hasAliases && MO.isKill() && Reg.isPhysical()) {
2105 // A super-register kill already exists.
2106 if (RegInfo->isSuperRegister(IncomingReg, Reg))
2107 return true;
2108 if (RegInfo->isSubRegister(IncomingReg, Reg))
2109 DeadOps.push_back(i);
2113 // Trim unneeded kill operands.
2114 while (!DeadOps.empty()) {
2115 unsigned OpIdx = DeadOps.back();
2116 if (getOperand(OpIdx).isImplicit() &&
2117 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2118 removeOperand(OpIdx);
2119 else
2120 getOperand(OpIdx).setIsKill(false);
2121 DeadOps.pop_back();
2124 // If not found, this means an alias of one of the operands is killed. Add a
2125 // new implicit operand if required.
2126 if (!Found && AddIfNotFound) {
2127 addOperand(MachineOperand::CreateReg(IncomingReg,
2128 false /*IsDef*/,
2129 true /*IsImp*/,
2130 true /*IsKill*/));
2131 return true;
2133 return Found;
2136 void MachineInstr::clearRegisterKills(Register Reg,
2137 const TargetRegisterInfo *RegInfo) {
2138 if (!Reg.isPhysical())
2139 RegInfo = nullptr;
2140 for (MachineOperand &MO : operands()) {
2141 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
2142 continue;
2143 Register OpReg = MO.getReg();
2144 if ((RegInfo && RegInfo->regsOverlap(Reg, OpReg)) || Reg == OpReg)
2145 MO.setIsKill(false);
2149 bool MachineInstr::addRegisterDead(Register Reg,
2150 const TargetRegisterInfo *RegInfo,
2151 bool AddIfNotFound) {
2152 bool isPhysReg = Reg.isPhysical();
2153 bool hasAliases = isPhysReg &&
2154 MCRegAliasIterator(Reg, RegInfo, false).isValid();
2155 bool Found = false;
2156 SmallVector<unsigned,4> DeadOps;
2157 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
2158 MachineOperand &MO = getOperand(i);
2159 if (!MO.isReg() || !MO.isDef())
2160 continue;
2161 Register MOReg = MO.getReg();
2162 if (!MOReg)
2163 continue;
2165 if (MOReg == Reg) {
2166 MO.setIsDead();
2167 Found = true;
2168 } else if (hasAliases && MO.isDead() && MOReg.isPhysical()) {
2169 // There exists a super-register that's marked dead.
2170 if (RegInfo->isSuperRegister(Reg, MOReg))
2171 return true;
2172 if (RegInfo->isSubRegister(Reg, MOReg))
2173 DeadOps.push_back(i);
2177 // Trim unneeded dead operands.
2178 while (!DeadOps.empty()) {
2179 unsigned OpIdx = DeadOps.back();
2180 if (getOperand(OpIdx).isImplicit() &&
2181 (!isInlineAsm() || findInlineAsmFlagIdx(OpIdx) < 0))
2182 removeOperand(OpIdx);
2183 else
2184 getOperand(OpIdx).setIsDead(false);
2185 DeadOps.pop_back();
2188 // If not found, this means an alias of one of the operands is dead. Add a
2189 // new implicit operand if required.
2190 if (Found || !AddIfNotFound)
2191 return Found;
2193 addOperand(MachineOperand::CreateReg(Reg,
2194 true /*IsDef*/,
2195 true /*IsImp*/,
2196 false /*IsKill*/,
2197 true /*IsDead*/));
2198 return true;
2201 void MachineInstr::clearRegisterDeads(Register Reg) {
2202 for (MachineOperand &MO : all_defs())
2203 if (MO.getReg() == Reg)
2204 MO.setIsDead(false);
2207 void MachineInstr::setRegisterDefReadUndef(Register Reg, bool IsUndef) {
2208 for (MachineOperand &MO : all_defs())
2209 if (MO.getReg() == Reg && MO.getSubReg() != 0)
2210 MO.setIsUndef(IsUndef);
2213 void MachineInstr::addRegisterDefined(Register Reg,
2214 const TargetRegisterInfo *RegInfo) {
2215 if (Reg.isPhysical()) {
2216 MachineOperand *MO = findRegisterDefOperand(Reg, RegInfo, false, false);
2217 if (MO)
2218 return;
2219 } else {
2220 for (const MachineOperand &MO : all_defs()) {
2221 if (MO.getReg() == Reg && MO.getSubReg() == 0)
2222 return;
2225 addOperand(MachineOperand::CreateReg(Reg,
2226 true /*IsDef*/,
2227 true /*IsImp*/));
2230 void MachineInstr::setPhysRegsDeadExcept(ArrayRef<Register> UsedRegs,
2231 const TargetRegisterInfo &TRI) {
2232 bool HasRegMask = false;
2233 for (MachineOperand &MO : operands()) {
2234 if (MO.isRegMask()) {
2235 HasRegMask = true;
2236 continue;
2238 if (!MO.isReg() || !MO.isDef()) continue;
2239 Register Reg = MO.getReg();
2240 if (!Reg.isPhysical())
2241 continue;
2242 // If there are no uses, including partial uses, the def is dead.
2243 if (llvm::none_of(UsedRegs,
2244 [&](MCRegister Use) { return TRI.regsOverlap(Use, Reg); }))
2245 MO.setIsDead();
2248 // This is a call with a register mask operand.
2249 // Mask clobbers are always dead, so add defs for the non-dead defines.
2250 if (HasRegMask)
2251 for (const Register &UsedReg : UsedRegs)
2252 addRegisterDefined(UsedReg, &TRI);
2255 unsigned
2256 MachineInstrExpressionTrait::getHashValue(const MachineInstr* const &MI) {
2257 // Build up a buffer of hash code components.
2258 SmallVector<size_t, 16> HashComponents;
2259 HashComponents.reserve(MI->getNumOperands() + 1);
2260 HashComponents.push_back(MI->getOpcode());
2261 for (const MachineOperand &MO : MI->operands()) {
2262 if (MO.isReg() && MO.isDef() && MO.getReg().isVirtual())
2263 continue; // Skip virtual register defs.
2265 HashComponents.push_back(hash_value(MO));
2267 return hash_combine_range(HashComponents.begin(), HashComponents.end());
2270 const MDNode *MachineInstr::getLocCookieMD() const {
2271 // Find the source location cookie.
2272 const MDNode *LocMD = nullptr;
2273 for (unsigned i = getNumOperands(); i != 0; --i) {
2274 if (getOperand(i-1).isMetadata() &&
2275 (LocMD = getOperand(i-1).getMetadata()) &&
2276 LocMD->getNumOperands() != 0) {
2277 if (mdconst::hasa<ConstantInt>(LocMD->getOperand(0)))
2278 return LocMD;
2282 return nullptr;
2285 void MachineInstr::emitInlineAsmError(const Twine &Msg) const {
2286 assert(isInlineAsm());
2287 const MDNode *LocMD = getLocCookieMD();
2288 uint64_t LocCookie =
2289 LocMD
2290 ? mdconst::extract<ConstantInt>(LocMD->getOperand(0))->getZExtValue()
2291 : 0;
2292 LLVMContext &Ctx = getMF()->getFunction().getContext();
2293 Ctx.diagnose(DiagnosticInfoInlineAsm(LocCookie, Msg));
2296 void MachineInstr::emitGenericError(const Twine &Msg) const {
2297 const Function &Fn = getMF()->getFunction();
2298 Fn.getContext().diagnose(
2299 DiagnosticInfoGenericWithLoc(Msg, Fn, getDebugLoc()));
2302 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2303 const MCInstrDesc &MCID, bool IsIndirect,
2304 Register Reg, const MDNode *Variable,
2305 const MDNode *Expr) {
2306 assert(isa<DILocalVariable>(Variable) && "not a variable");
2307 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2308 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2309 "Expected inlined-at fields to agree");
2310 auto MIB = BuildMI(MF, DL, MCID).addReg(Reg);
2311 if (IsIndirect)
2312 MIB.addImm(0U);
2313 else
2314 MIB.addReg(0U);
2315 return MIB.addMetadata(Variable).addMetadata(Expr);
2318 MachineInstrBuilder llvm::BuildMI(MachineFunction &MF, const DebugLoc &DL,
2319 const MCInstrDesc &MCID, bool IsIndirect,
2320 ArrayRef<MachineOperand> DebugOps,
2321 const MDNode *Variable, const MDNode *Expr) {
2322 assert(isa<DILocalVariable>(Variable) && "not a variable");
2323 assert(cast<DIExpression>(Expr)->isValid() && "not an expression");
2324 assert(cast<DILocalVariable>(Variable)->isValidLocationForIntrinsic(DL) &&
2325 "Expected inlined-at fields to agree");
2326 if (MCID.Opcode == TargetOpcode::DBG_VALUE) {
2327 assert(DebugOps.size() == 1 &&
2328 "DBG_VALUE must contain exactly one debug operand");
2329 MachineOperand DebugOp = DebugOps[0];
2330 if (DebugOp.isReg())
2331 return BuildMI(MF, DL, MCID, IsIndirect, DebugOp.getReg(), Variable,
2332 Expr);
2334 auto MIB = BuildMI(MF, DL, MCID).add(DebugOp);
2335 if (IsIndirect)
2336 MIB.addImm(0U);
2337 else
2338 MIB.addReg(0U);
2339 return MIB.addMetadata(Variable).addMetadata(Expr);
2342 auto MIB = BuildMI(MF, DL, MCID);
2343 MIB.addMetadata(Variable).addMetadata(Expr);
2344 for (const MachineOperand &DebugOp : DebugOps)
2345 if (DebugOp.isReg())
2346 MIB.addReg(DebugOp.getReg());
2347 else
2348 MIB.add(DebugOp);
2349 return MIB;
2352 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2353 MachineBasicBlock::iterator I,
2354 const DebugLoc &DL, const MCInstrDesc &MCID,
2355 bool IsIndirect, Register Reg,
2356 const MDNode *Variable, const MDNode *Expr) {
2357 MachineFunction &MF = *BB.getParent();
2358 MachineInstr *MI = BuildMI(MF, DL, MCID, IsIndirect, Reg, Variable, Expr);
2359 BB.insert(I, MI);
2360 return MachineInstrBuilder(MF, MI);
2363 MachineInstrBuilder llvm::BuildMI(MachineBasicBlock &BB,
2364 MachineBasicBlock::iterator I,
2365 const DebugLoc &DL, const MCInstrDesc &MCID,
2366 bool IsIndirect,
2367 ArrayRef<MachineOperand> DebugOps,
2368 const MDNode *Variable, const MDNode *Expr) {
2369 MachineFunction &MF = *BB.getParent();
2370 MachineInstr *MI =
2371 BuildMI(MF, DL, MCID, IsIndirect, DebugOps, Variable, Expr);
2372 BB.insert(I, MI);
2373 return MachineInstrBuilder(MF, *MI);
2376 /// Compute the new DIExpression to use with a DBG_VALUE for a spill slot.
2377 /// This prepends DW_OP_deref when spilling an indirect DBG_VALUE.
2378 static const DIExpression *computeExprForSpill(
2379 const MachineInstr &MI,
2380 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2381 assert(MI.getDebugVariable()->isValidLocationForIntrinsic(MI.getDebugLoc()) &&
2382 "Expected inlined-at fields to agree");
2384 const DIExpression *Expr = MI.getDebugExpression();
2385 if (MI.isIndirectDebugValue()) {
2386 assert(MI.getDebugOffset().getImm() == 0 &&
2387 "DBG_VALUE with nonzero offset");
2388 Expr = DIExpression::prepend(Expr, DIExpression::DerefBefore);
2389 } else if (MI.isDebugValueList()) {
2390 // We will replace the spilled register with a frame index, so
2391 // immediately deref all references to the spilled register.
2392 std::array<uint64_t, 1> Ops{{dwarf::DW_OP_deref}};
2393 for (const MachineOperand *Op : SpilledOperands) {
2394 unsigned OpIdx = MI.getDebugOperandIndex(Op);
2395 Expr = DIExpression::appendOpsToArg(Expr, Ops, OpIdx);
2398 return Expr;
2400 static const DIExpression *computeExprForSpill(const MachineInstr &MI,
2401 Register SpillReg) {
2402 assert(MI.hasDebugOperandForReg(SpillReg) && "Spill Reg is not used in MI.");
2403 SmallVector<const MachineOperand *> SpillOperands;
2404 for (const MachineOperand &Op : MI.getDebugOperandsForReg(SpillReg))
2405 SpillOperands.push_back(&Op);
2406 return computeExprForSpill(MI, SpillOperands);
2409 MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
2410 MachineBasicBlock::iterator I,
2411 const MachineInstr &Orig,
2412 int FrameIndex, Register SpillReg) {
2413 assert(!Orig.isDebugRef() &&
2414 "DBG_INSTR_REF should not reference a virtual register.");
2415 const DIExpression *Expr = computeExprForSpill(Orig, SpillReg);
2416 MachineInstrBuilder NewMI =
2417 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2418 // Non-Variadic Operands: Location, Offset, Variable, Expression
2419 // Variadic Operands: Variable, Expression, Locations...
2420 if (Orig.isNonListDebugValue())
2421 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2422 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2423 if (Orig.isDebugValueList()) {
2424 for (const MachineOperand &Op : Orig.debug_operands())
2425 if (Op.isReg() && Op.getReg() == SpillReg)
2426 NewMI.addFrameIndex(FrameIndex);
2427 else
2428 NewMI.add(MachineOperand(Op));
2430 return NewMI;
2432 MachineInstr *llvm::buildDbgValueForSpill(
2433 MachineBasicBlock &BB, MachineBasicBlock::iterator I,
2434 const MachineInstr &Orig, int FrameIndex,
2435 const SmallVectorImpl<const MachineOperand *> &SpilledOperands) {
2436 const DIExpression *Expr = computeExprForSpill(Orig, SpilledOperands);
2437 MachineInstrBuilder NewMI =
2438 BuildMI(BB, I, Orig.getDebugLoc(), Orig.getDesc());
2439 // Non-Variadic Operands: Location, Offset, Variable, Expression
2440 // Variadic Operands: Variable, Expression, Locations...
2441 if (Orig.isNonListDebugValue())
2442 NewMI.addFrameIndex(FrameIndex).addImm(0U);
2443 NewMI.addMetadata(Orig.getDebugVariable()).addMetadata(Expr);
2444 if (Orig.isDebugValueList()) {
2445 for (const MachineOperand &Op : Orig.debug_operands())
2446 if (is_contained(SpilledOperands, &Op))
2447 NewMI.addFrameIndex(FrameIndex);
2448 else
2449 NewMI.add(MachineOperand(Op));
2451 return NewMI;
2454 void llvm::updateDbgValueForSpill(MachineInstr &Orig, int FrameIndex,
2455 Register Reg) {
2456 const DIExpression *Expr = computeExprForSpill(Orig, Reg);
2457 if (Orig.isNonListDebugValue())
2458 Orig.getDebugOffset().ChangeToImmediate(0U);
2459 for (MachineOperand &Op : Orig.getDebugOperandsForReg(Reg))
2460 Op.ChangeToFrameIndex(FrameIndex);
2461 Orig.getDebugExpressionOp().setMetadata(Expr);
2464 void MachineInstr::collectDebugValues(
2465 SmallVectorImpl<MachineInstr *> &DbgValues) {
2466 MachineInstr &MI = *this;
2467 if (!MI.getOperand(0).isReg())
2468 return;
2470 MachineBasicBlock::iterator DI = MI; ++DI;
2471 for (MachineBasicBlock::iterator DE = MI.getParent()->end();
2472 DI != DE; ++DI) {
2473 if (!DI->isDebugValue())
2474 return;
2475 if (DI->hasDebugOperandForReg(MI.getOperand(0).getReg()))
2476 DbgValues.push_back(&*DI);
2480 void MachineInstr::changeDebugValuesDefReg(Register Reg) {
2481 // Collect matching debug values.
2482 SmallVector<MachineInstr *, 2> DbgValues;
2484 if (!getOperand(0).isReg())
2485 return;
2487 Register DefReg = getOperand(0).getReg();
2488 auto *MRI = getRegInfo();
2489 for (auto &MO : MRI->use_operands(DefReg)) {
2490 auto *DI = MO.getParent();
2491 if (!DI->isDebugValue())
2492 continue;
2493 if (DI->hasDebugOperandForReg(DefReg)) {
2494 DbgValues.push_back(DI);
2498 // Propagate Reg to debug value instructions.
2499 for (auto *DBI : DbgValues)
2500 for (MachineOperand &Op : DBI->getDebugOperandsForReg(DefReg))
2501 Op.setReg(Reg);
2504 using MMOList = SmallVector<const MachineMemOperand *, 2>;
2506 static LocationSize getSpillSlotSize(const MMOList &Accesses,
2507 const MachineFrameInfo &MFI) {
2508 uint64_t Size = 0;
2509 for (const auto *A : Accesses) {
2510 if (MFI.isSpillSlotObjectIndex(
2511 cast<FixedStackPseudoSourceValue>(A->getPseudoValue())
2512 ->getFrameIndex())) {
2513 LocationSize S = A->getSize();
2514 if (!S.hasValue())
2515 return LocationSize::beforeOrAfterPointer();
2516 Size += S.getValue();
2519 return Size;
2522 std::optional<LocationSize>
2523 MachineInstr::getSpillSize(const TargetInstrInfo *TII) const {
2524 int FI;
2525 if (TII->isStoreToStackSlotPostFE(*this, FI)) {
2526 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2527 if (MFI.isSpillSlotObjectIndex(FI))
2528 return (*memoperands_begin())->getSize();
2530 return std::nullopt;
2533 std::optional<LocationSize>
2534 MachineInstr::getFoldedSpillSize(const TargetInstrInfo *TII) const {
2535 MMOList Accesses;
2536 if (TII->hasStoreToStackSlot(*this, Accesses))
2537 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2538 return std::nullopt;
2541 std::optional<LocationSize>
2542 MachineInstr::getRestoreSize(const TargetInstrInfo *TII) const {
2543 int FI;
2544 if (TII->isLoadFromStackSlotPostFE(*this, FI)) {
2545 const MachineFrameInfo &MFI = getMF()->getFrameInfo();
2546 if (MFI.isSpillSlotObjectIndex(FI))
2547 return (*memoperands_begin())->getSize();
2549 return std::nullopt;
2552 std::optional<LocationSize>
2553 MachineInstr::getFoldedRestoreSize(const TargetInstrInfo *TII) const {
2554 MMOList Accesses;
2555 if (TII->hasLoadFromStackSlot(*this, Accesses))
2556 return getSpillSlotSize(Accesses, getMF()->getFrameInfo());
2557 return std::nullopt;
2560 unsigned MachineInstr::getDebugInstrNum() {
2561 if (DebugInstrNum == 0)
2562 DebugInstrNum = getParent()->getParent()->getNewDebugInstrNum();
2563 return DebugInstrNum;
2566 unsigned MachineInstr::getDebugInstrNum(MachineFunction &MF) {
2567 if (DebugInstrNum == 0)
2568 DebugInstrNum = MF.getNewDebugInstrNum();
2569 return DebugInstrNum;
2572 std::tuple<LLT, LLT> MachineInstr::getFirst2LLTs() const {
2573 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2574 getRegInfo()->getType(getOperand(1).getReg()));
2577 std::tuple<LLT, LLT, LLT> MachineInstr::getFirst3LLTs() const {
2578 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2579 getRegInfo()->getType(getOperand(1).getReg()),
2580 getRegInfo()->getType(getOperand(2).getReg()));
2583 std::tuple<LLT, LLT, LLT, LLT> MachineInstr::getFirst4LLTs() const {
2584 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2585 getRegInfo()->getType(getOperand(1).getReg()),
2586 getRegInfo()->getType(getOperand(2).getReg()),
2587 getRegInfo()->getType(getOperand(3).getReg()));
2590 std::tuple<LLT, LLT, LLT, LLT, LLT> MachineInstr::getFirst5LLTs() const {
2591 return std::tuple(getRegInfo()->getType(getOperand(0).getReg()),
2592 getRegInfo()->getType(getOperand(1).getReg()),
2593 getRegInfo()->getType(getOperand(2).getReg()),
2594 getRegInfo()->getType(getOperand(3).getReg()),
2595 getRegInfo()->getType(getOperand(4).getReg()));
2598 std::tuple<Register, LLT, Register, LLT>
2599 MachineInstr::getFirst2RegLLTs() const {
2600 Register Reg0 = getOperand(0).getReg();
2601 Register Reg1 = getOperand(1).getReg();
2602 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2603 getRegInfo()->getType(Reg1));
2606 std::tuple<Register, LLT, Register, LLT, Register, LLT>
2607 MachineInstr::getFirst3RegLLTs() const {
2608 Register Reg0 = getOperand(0).getReg();
2609 Register Reg1 = getOperand(1).getReg();
2610 Register Reg2 = getOperand(2).getReg();
2611 return std::tuple(Reg0, getRegInfo()->getType(Reg0), Reg1,
2612 getRegInfo()->getType(Reg1), Reg2,
2613 getRegInfo()->getType(Reg2));
2616 std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT>
2617 MachineInstr::getFirst4RegLLTs() const {
2618 Register Reg0 = getOperand(0).getReg();
2619 Register Reg1 = getOperand(1).getReg();
2620 Register Reg2 = getOperand(2).getReg();
2621 Register Reg3 = getOperand(3).getReg();
2622 return std::tuple(
2623 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2624 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3));
2627 std::tuple<Register, LLT, Register, LLT, Register, LLT, Register, LLT, Register,
2628 LLT>
2629 MachineInstr::getFirst5RegLLTs() const {
2630 Register Reg0 = getOperand(0).getReg();
2631 Register Reg1 = getOperand(1).getReg();
2632 Register Reg2 = getOperand(2).getReg();
2633 Register Reg3 = getOperand(3).getReg();
2634 Register Reg4 = getOperand(4).getReg();
2635 return std::tuple(
2636 Reg0, getRegInfo()->getType(Reg0), Reg1, getRegInfo()->getType(Reg1),
2637 Reg2, getRegInfo()->getType(Reg2), Reg3, getRegInfo()->getType(Reg3),
2638 Reg4, getRegInfo()->getType(Reg4));
2641 void MachineInstr::insert(mop_iterator InsertBefore,
2642 ArrayRef<MachineOperand> Ops) {
2643 assert(InsertBefore != nullptr && "invalid iterator");
2644 assert(InsertBefore->getParent() == this &&
2645 "iterator points to operand of other inst");
2646 if (Ops.empty())
2647 return;
2649 // Do one pass to untie operands.
2650 SmallDenseMap<unsigned, unsigned> TiedOpIndices;
2651 for (const MachineOperand &MO : operands()) {
2652 if (MO.isReg() && MO.isTied()) {
2653 unsigned OpNo = getOperandNo(&MO);
2654 unsigned TiedTo = findTiedOperandIdx(OpNo);
2655 TiedOpIndices[OpNo] = TiedTo;
2656 untieRegOperand(OpNo);
2660 unsigned OpIdx = getOperandNo(InsertBefore);
2661 unsigned NumOperands = getNumOperands();
2662 unsigned OpsToMove = NumOperands - OpIdx;
2664 SmallVector<MachineOperand> MovingOps;
2665 MovingOps.reserve(OpsToMove);
2667 for (unsigned I = 0; I < OpsToMove; ++I) {
2668 MovingOps.emplace_back(getOperand(OpIdx));
2669 removeOperand(OpIdx);
2671 for (const MachineOperand &MO : Ops)
2672 addOperand(MO);
2673 for (const MachineOperand &OpMoved : MovingOps)
2674 addOperand(OpMoved);
2676 // Re-tie operands.
2677 for (auto [Tie1, Tie2] : TiedOpIndices) {
2678 if (Tie1 >= OpIdx)
2679 Tie1 += Ops.size();
2680 if (Tie2 >= OpIdx)
2681 Tie2 += Ops.size();
2682 tieOperands(Tie1, Tie2);
2686 bool MachineInstr::mayFoldInlineAsmRegOp(unsigned OpId) const {
2687 assert(OpId && "expected non-zero operand id");
2688 assert(isInlineAsm() && "should only be used on inline asm");
2690 if (!getOperand(OpId).isReg())
2691 return false;
2693 const MachineOperand &MD = getOperand(OpId - 1);
2694 if (!MD.isImm())
2695 return false;
2697 InlineAsm::Flag F(MD.getImm());
2698 if (F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind())
2699 return F.getRegMayBeFolded();
2700 return false;