Revert " [LoongArch][ISel] Check the number of sign bits in `PatGprGpr_32` (#107432)"
[llvm-project.git] / llvm / lib / CodeGen / MachineFunction.cpp
blob7f6a75208d253f8f1cdddaa67f770e8c991d5a59
1 //===- MachineFunction.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Collect native machine code information for a function. This allows
10 // target-specific information about the generated code to be stored with each
11 // function.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/ConstantFolding.h"
25 #include "llvm/Analysis/ProfileSummaryInfo.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineJumpTableInfo.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/PseudoSourceValue.h"
35 #include "llvm/CodeGen/PseudoSourceValueManager.h"
36 #include "llvm/CodeGen/TargetFrameLowering.h"
37 #include "llvm/CodeGen/TargetInstrInfo.h"
38 #include "llvm/CodeGen/TargetLowering.h"
39 #include "llvm/CodeGen/TargetRegisterInfo.h"
40 #include "llvm/CodeGen/TargetSubtargetInfo.h"
41 #include "llvm/CodeGen/WasmEHFuncInfo.h"
42 #include "llvm/CodeGen/WinEHFuncInfo.h"
43 #include "llvm/Config/llvm-config.h"
44 #include "llvm/IR/Attributes.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/Constant.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DerivedTypes.h"
49 #include "llvm/IR/EHPersonalities.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalValue.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/Metadata.h"
55 #include "llvm/IR/Module.h"
56 #include "llvm/IR/ModuleSlotTracker.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/MC/MCContext.h"
59 #include "llvm/MC/MCSymbol.h"
60 #include "llvm/MC/SectionKind.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CommandLine.h"
63 #include "llvm/Support/Compiler.h"
64 #include "llvm/Support/DOTGraphTraits.h"
65 #include "llvm/Support/ErrorHandling.h"
66 #include "llvm/Support/GraphWriter.h"
67 #include "llvm/Support/raw_ostream.h"
68 #include "llvm/Target/TargetMachine.h"
69 #include <algorithm>
70 #include <cassert>
71 #include <cstddef>
72 #include <cstdint>
73 #include <iterator>
74 #include <string>
75 #include <type_traits>
76 #include <utility>
77 #include <vector>
79 #include "LiveDebugValues/LiveDebugValues.h"
81 using namespace llvm;
83 #define DEBUG_TYPE "codegen"
85 static cl::opt<unsigned> AlignAllFunctions(
86 "align-all-functions",
87 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
88 "means align on 16B boundaries)."),
89 cl::init(0), cl::Hidden);
91 static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
92 using P = MachineFunctionProperties::Property;
94 // clang-format off
95 switch(Prop) {
96 case P::FailedISel: return "FailedISel";
97 case P::IsSSA: return "IsSSA";
98 case P::Legalized: return "Legalized";
99 case P::NoPHIs: return "NoPHIs";
100 case P::NoVRegs: return "NoVRegs";
101 case P::RegBankSelected: return "RegBankSelected";
102 case P::Selected: return "Selected";
103 case P::TracksLiveness: return "TracksLiveness";
104 case P::TiedOpsRewritten: return "TiedOpsRewritten";
105 case P::FailsVerification: return "FailsVerification";
106 case P::TracksDebugUserValues: return "TracksDebugUserValues";
108 // clang-format on
109 llvm_unreachable("Invalid machine function property");
112 void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo) {
113 if (!F.hasFnAttribute(Attribute::SafeStack))
114 return;
116 auto *Existing =
117 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation));
119 if (!Existing || Existing->getNumOperands() != 2)
120 return;
122 auto *MetadataName = "unsafe-stack-size";
123 if (auto &N = Existing->getOperand(0)) {
124 if (N.equalsStr(MetadataName)) {
125 if (auto &Op = Existing->getOperand(1)) {
126 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue();
127 FrameInfo.setUnsafeStackSize(Val);
133 // Pin the vtable to this file.
134 void MachineFunction::Delegate::anchor() {}
136 void MachineFunctionProperties::print(raw_ostream &OS) const {
137 const char *Separator = "";
138 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
139 if (!Properties[I])
140 continue;
141 OS << Separator << getPropertyName(static_cast<Property>(I));
142 Separator = ", ";
146 //===----------------------------------------------------------------------===//
147 // MachineFunction implementation
148 //===----------------------------------------------------------------------===//
150 // Out-of-line virtual method.
151 MachineFunctionInfo::~MachineFunctionInfo() = default;
153 void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
154 MBB->getParent()->deleteMachineBasicBlock(MBB);
157 static inline Align getFnStackAlignment(const TargetSubtargetInfo *STI,
158 const Function &F) {
159 if (auto MA = F.getFnStackAlign())
160 return *MA;
161 return STI->getFrameLowering()->getStackAlign();
164 MachineFunction::MachineFunction(Function &F, const LLVMTargetMachine &Target,
165 const TargetSubtargetInfo &STI,
166 unsigned FunctionNum, MachineModuleInfo &mmi)
167 : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) {
168 FunctionNumber = FunctionNum;
169 init();
172 void MachineFunction::handleInsertion(MachineInstr &MI) {
173 if (TheDelegate)
174 TheDelegate->MF_HandleInsertion(MI);
177 void MachineFunction::handleRemoval(MachineInstr &MI) {
178 if (TheDelegate)
179 TheDelegate->MF_HandleRemoval(MI);
182 void MachineFunction::handleChangeDesc(MachineInstr &MI,
183 const MCInstrDesc &TID) {
184 if (TheDelegate)
185 TheDelegate->MF_HandleChangeDesc(MI, TID);
188 void MachineFunction::init() {
189 // Assume the function starts in SSA form with correct liveness.
190 Properties.set(MachineFunctionProperties::Property::IsSSA);
191 Properties.set(MachineFunctionProperties::Property::TracksLiveness);
192 if (STI->getRegisterInfo())
193 RegInfo = new (Allocator) MachineRegisterInfo(this);
194 else
195 RegInfo = nullptr;
197 MFInfo = nullptr;
199 // We can realign the stack if the target supports it and the user hasn't
200 // explicitly asked us not to.
201 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
202 !F.hasFnAttribute("no-realign-stack");
203 bool ForceRealignSP = F.hasFnAttribute(Attribute::StackAlignment) ||
204 F.hasFnAttribute("stackrealign");
205 FrameInfo = new (Allocator) MachineFrameInfo(
206 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
207 /*ForcedRealign=*/ForceRealignSP && CanRealignSP);
209 setUnsafeStackSize(F, *FrameInfo);
211 if (F.hasFnAttribute(Attribute::StackAlignment))
212 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
214 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
215 Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
217 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
218 // FIXME: Use Function::hasOptSize().
219 if (!F.hasFnAttribute(Attribute::OptimizeForSize))
220 Alignment = std::max(Alignment,
221 STI->getTargetLowering()->getPrefFunctionAlignment());
223 // -fsanitize=function and -fsanitize=kcfi instrument indirect function calls
224 // to load a type hash before the function label. Ensure functions are aligned
225 // by a least 4 to avoid unaligned access, which is especially important for
226 // -mno-unaligned-access.
227 if (F.hasMetadata(LLVMContext::MD_func_sanitize) ||
228 F.getMetadata(LLVMContext::MD_kcfi_type))
229 Alignment = std::max(Alignment, Align(4));
231 if (AlignAllFunctions)
232 Alignment = Align(1ULL << AlignAllFunctions);
234 JumpTableInfo = nullptr;
236 if (isFuncletEHPersonality(classifyEHPersonality(
237 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
238 WinEHInfo = new (Allocator) WinEHFuncInfo();
241 if (isScopedEHPersonality(classifyEHPersonality(
242 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
243 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
246 assert(Target.isCompatibleDataLayout(getDataLayout()) &&
247 "Can't create a MachineFunction using a Module with a "
248 "Target-incompatible DataLayout attached\n");
250 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget());
253 void MachineFunction::initTargetMachineFunctionInfo(
254 const TargetSubtargetInfo &STI) {
255 assert(!MFInfo && "MachineFunctionInfo already set");
256 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI);
259 MachineFunction::~MachineFunction() {
260 clear();
263 void MachineFunction::clear() {
264 Properties.reset();
265 // Don't call destructors on MachineInstr and MachineOperand. All of their
266 // memory comes from the BumpPtrAllocator which is about to be purged.
268 // Do call MachineBasicBlock destructors, it contains std::vectors.
269 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
270 I->Insts.clearAndLeakNodesUnsafely();
271 MBBNumbering.clear();
273 InstructionRecycler.clear(Allocator);
274 OperandRecycler.clear(Allocator);
275 BasicBlockRecycler.clear(Allocator);
276 CodeViewAnnotations.clear();
277 VariableDbgInfos.clear();
278 if (RegInfo) {
279 RegInfo->~MachineRegisterInfo();
280 Allocator.Deallocate(RegInfo);
282 if (MFInfo) {
283 MFInfo->~MachineFunctionInfo();
284 Allocator.Deallocate(MFInfo);
287 FrameInfo->~MachineFrameInfo();
288 Allocator.Deallocate(FrameInfo);
290 ConstantPool->~MachineConstantPool();
291 Allocator.Deallocate(ConstantPool);
293 if (JumpTableInfo) {
294 JumpTableInfo->~MachineJumpTableInfo();
295 Allocator.Deallocate(JumpTableInfo);
298 if (WinEHInfo) {
299 WinEHInfo->~WinEHFuncInfo();
300 Allocator.Deallocate(WinEHInfo);
303 if (WasmEHInfo) {
304 WasmEHInfo->~WasmEHFuncInfo();
305 Allocator.Deallocate(WasmEHInfo);
309 const DataLayout &MachineFunction::getDataLayout() const {
310 return F.getDataLayout();
313 /// Get the JumpTableInfo for this function.
314 /// If it does not already exist, allocate one.
315 MachineJumpTableInfo *MachineFunction::
316 getOrCreateJumpTableInfo(unsigned EntryKind) {
317 if (JumpTableInfo) return JumpTableInfo;
319 JumpTableInfo = new (Allocator)
320 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
321 return JumpTableInfo;
324 DenormalMode MachineFunction::getDenormalMode(const fltSemantics &FPType) const {
325 return F.getDenormalMode(FPType);
328 /// Should we be emitting segmented stack stuff for the function
329 bool MachineFunction::shouldSplitStack() const {
330 return getFunction().hasFnAttribute("split-stack");
333 [[nodiscard]] unsigned
334 MachineFunction::addFrameInst(const MCCFIInstruction &Inst) {
335 FrameInstructions.push_back(Inst);
336 return FrameInstructions.size() - 1;
339 /// This discards all of the MachineBasicBlock numbers and recomputes them.
340 /// This guarantees that the MBB numbers are sequential, dense, and match the
341 /// ordering of the blocks within the function. If a specific MachineBasicBlock
342 /// is specified, only that block and those after it are renumbered.
343 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
344 if (empty()) { MBBNumbering.clear(); return; }
345 MachineFunction::iterator MBBI, E = end();
346 if (MBB == nullptr)
347 MBBI = begin();
348 else
349 MBBI = MBB->getIterator();
351 // Figure out the block number this should have.
352 unsigned BlockNo = 0;
353 if (MBBI != begin())
354 BlockNo = std::prev(MBBI)->getNumber() + 1;
356 for (; MBBI != E; ++MBBI, ++BlockNo) {
357 if (MBBI->getNumber() != (int)BlockNo) {
358 // Remove use of the old number.
359 if (MBBI->getNumber() != -1) {
360 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
361 "MBB number mismatch!");
362 MBBNumbering[MBBI->getNumber()] = nullptr;
365 // If BlockNo is already taken, set that block's number to -1.
366 if (MBBNumbering[BlockNo])
367 MBBNumbering[BlockNo]->setNumber(-1);
369 MBBNumbering[BlockNo] = &*MBBI;
370 MBBI->setNumber(BlockNo);
374 // Okay, all the blocks are renumbered. If we have compactified the block
375 // numbering, shrink MBBNumbering now.
376 assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
377 MBBNumbering.resize(BlockNo);
380 /// This method iterates over the basic blocks and assigns their IsBeginSection
381 /// and IsEndSection fields. This must be called after MBB layout is finalized
382 /// and the SectionID's are assigned to MBBs.
383 void MachineFunction::assignBeginEndSections() {
384 front().setIsBeginSection();
385 auto CurrentSectionID = front().getSectionID();
386 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
387 if (MBBI->getSectionID() == CurrentSectionID)
388 continue;
389 MBBI->setIsBeginSection();
390 std::prev(MBBI)->setIsEndSection();
391 CurrentSectionID = MBBI->getSectionID();
393 back().setIsEndSection();
396 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
397 MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
398 DebugLoc DL,
399 bool NoImplicit) {
400 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
401 MachineInstr(*this, MCID, std::move(DL), NoImplicit);
404 /// Create a new MachineInstr which is a copy of the 'Orig' instruction,
405 /// identical in all ways except the instruction has no parent, prev, or next.
406 MachineInstr *
407 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
408 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
409 MachineInstr(*this, *Orig);
412 MachineInstr &MachineFunction::cloneMachineInstrBundle(
413 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
414 const MachineInstr &Orig) {
415 MachineInstr *FirstClone = nullptr;
416 MachineBasicBlock::const_instr_iterator I = Orig.getIterator();
417 while (true) {
418 MachineInstr *Cloned = CloneMachineInstr(&*I);
419 MBB.insert(InsertBefore, Cloned);
420 if (FirstClone == nullptr) {
421 FirstClone = Cloned;
422 } else {
423 Cloned->bundleWithPred();
426 if (!I->isBundledWithSucc())
427 break;
428 ++I;
430 // Copy over call site info to the cloned instruction if needed. If Orig is in
431 // a bundle, copyCallSiteInfo takes care of finding the call instruction in
432 // the bundle.
433 if (Orig.shouldUpdateCallSiteInfo())
434 copyCallSiteInfo(&Orig, FirstClone);
435 return *FirstClone;
438 /// Delete the given MachineInstr.
440 /// This function also serves as the MachineInstr destructor - the real
441 /// ~MachineInstr() destructor must be empty.
442 void MachineFunction::deleteMachineInstr(MachineInstr *MI) {
443 // Verify that a call site info is at valid state. This assertion should
444 // be triggered during the implementation of support for the
445 // call site info of a new architecture. If the assertion is triggered,
446 // back trace will tell where to insert a call to updateCallSiteInfo().
447 assert((!MI->isCandidateForCallSiteEntry() || !CallSitesInfo.contains(MI)) &&
448 "Call site info was not updated!");
449 // Strip it for parts. The operand array and the MI object itself are
450 // independently recyclable.
451 if (MI->Operands)
452 deallocateOperandArray(MI->CapOperands, MI->Operands);
453 // Don't call ~MachineInstr() which must be trivial anyway because
454 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
455 // destructors.
456 InstructionRecycler.Deallocate(Allocator, MI);
459 /// Allocate a new MachineBasicBlock. Use this instead of
460 /// `new MachineBasicBlock'.
461 MachineBasicBlock *
462 MachineFunction::CreateMachineBasicBlock(const BasicBlock *BB,
463 std::optional<UniqueBBID> BBID) {
464 MachineBasicBlock *MBB =
465 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
466 MachineBasicBlock(*this, BB);
467 // Set BBID for `-basic-block=sections=labels` and
468 // `-basic-block-sections=list` to allow robust mapping of profiles to basic
469 // blocks.
470 if (Target.getBBSectionsType() == BasicBlockSection::Labels ||
471 Target.Options.BBAddrMap ||
472 Target.getBBSectionsType() == BasicBlockSection::List)
473 MBB->setBBID(BBID.has_value() ? *BBID : UniqueBBID{NextBBID++, 0});
474 return MBB;
477 /// Delete the given MachineBasicBlock.
478 void MachineFunction::deleteMachineBasicBlock(MachineBasicBlock *MBB) {
479 assert(MBB->getParent() == this && "MBB parent mismatch!");
480 // Clean up any references to MBB in jump tables before deleting it.
481 if (JumpTableInfo)
482 JumpTableInfo->RemoveMBBFromJumpTables(MBB);
483 MBB->~MachineBasicBlock();
484 BasicBlockRecycler.Deallocate(Allocator, MBB);
487 MachineMemOperand *MachineFunction::getMachineMemOperand(
488 MachinePointerInfo PtrInfo, MachineMemOperand::Flags F, LocationSize Size,
489 Align BaseAlignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
490 SyncScope::ID SSID, AtomicOrdering Ordering,
491 AtomicOrdering FailureOrdering) {
492 assert((!Size.hasValue() ||
493 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
494 "Unexpected an unknown size to be represented using "
495 "LocationSize::beforeOrAfter()");
496 return new (Allocator)
497 MachineMemOperand(PtrInfo, F, Size, BaseAlignment, AAInfo, Ranges, SSID,
498 Ordering, FailureOrdering);
501 MachineMemOperand *MachineFunction::getMachineMemOperand(
502 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
503 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
504 SyncScope::ID SSID, AtomicOrdering Ordering,
505 AtomicOrdering FailureOrdering) {
506 return new (Allocator)
507 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID,
508 Ordering, FailureOrdering);
511 MachineMemOperand *
512 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
513 const MachinePointerInfo &PtrInfo,
514 LocationSize Size) {
515 assert((!Size.hasValue() ||
516 Size.getValue().getKnownMinValue() != ~UINT64_C(0)) &&
517 "Unexpected an unknown size to be represented using "
518 "LocationSize::beforeOrAfter()");
519 return new (Allocator)
520 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
521 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
522 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
525 MachineMemOperand *MachineFunction::getMachineMemOperand(
526 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) {
527 return new (Allocator)
528 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(),
529 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
530 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
533 MachineMemOperand *
534 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
535 int64_t Offset, LLT Ty) {
536 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
538 // If there is no pointer value, the offset isn't tracked so we need to adjust
539 // the base alignment.
540 Align Alignment = PtrInfo.V.isNull()
541 ? commonAlignment(MMO->getBaseAlign(), Offset)
542 : MMO->getBaseAlign();
544 // Do not preserve ranges, since we don't necessarily know what the high bits
545 // are anymore.
546 return new (Allocator) MachineMemOperand(
547 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment,
548 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
549 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
552 MachineMemOperand *
553 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
554 const AAMDNodes &AAInfo) {
555 MachinePointerInfo MPI = MMO->getValue() ?
556 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
557 MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());
559 return new (Allocator) MachineMemOperand(
560 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
561 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(),
562 MMO->getFailureOrdering());
565 MachineMemOperand *
566 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
567 MachineMemOperand::Flags Flags) {
568 return new (Allocator) MachineMemOperand(
569 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
570 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
571 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
574 MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
575 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
576 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections,
577 uint32_t CFIType, MDNode *MMRAs) {
578 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
579 PostInstrSymbol, HeapAllocMarker,
580 PCSections, CFIType, MMRAs);
583 const char *MachineFunction::createExternalSymbolName(StringRef Name) {
584 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
585 llvm::copy(Name, Dest);
586 Dest[Name.size()] = 0;
587 return Dest;
590 uint32_t *MachineFunction::allocateRegMask() {
591 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
592 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
593 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
594 memset(Mask, 0, Size * sizeof(Mask[0]));
595 return Mask;
598 ArrayRef<int> MachineFunction::allocateShuffleMask(ArrayRef<int> Mask) {
599 int* AllocMask = Allocator.Allocate<int>(Mask.size());
600 copy(Mask, AllocMask);
601 return {AllocMask, Mask.size()};
604 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
605 LLVM_DUMP_METHOD void MachineFunction::dump() const {
606 print(dbgs());
608 #endif
610 StringRef MachineFunction::getName() const {
611 return getFunction().getName();
614 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
615 OS << "# Machine code for function " << getName() << ": ";
616 getProperties().print(OS);
617 OS << '\n';
619 // Print Frame Information
620 FrameInfo->print(*this, OS);
622 // Print JumpTable Information
623 if (JumpTableInfo)
624 JumpTableInfo->print(OS);
626 // Print Constant Pool
627 ConstantPool->print(OS);
629 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
631 if (RegInfo && !RegInfo->livein_empty()) {
632 OS << "Function Live Ins: ";
633 for (MachineRegisterInfo::livein_iterator
634 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
635 OS << printReg(I->first, TRI);
636 if (I->second)
637 OS << " in " << printReg(I->second, TRI);
638 if (std::next(I) != E)
639 OS << ", ";
641 OS << '\n';
644 ModuleSlotTracker MST(getFunction().getParent());
645 MST.incorporateFunction(getFunction());
646 for (const auto &BB : *this) {
647 OS << '\n';
648 // If we print the whole function, print it at its most verbose level.
649 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
652 OS << "\n# End machine code for function " << getName() << ".\n\n";
655 /// True if this function needs frame moves for debug or exceptions.
656 bool MachineFunction::needsFrameMoves() const {
657 return getMMI().hasDebugInfo() ||
658 getTarget().Options.ForceDwarfFrameSection ||
659 F.needsUnwindTableEntry();
662 namespace llvm {
664 template<>
665 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
666 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
668 static std::string getGraphName(const MachineFunction *F) {
669 return ("CFG for '" + F->getName() + "' function").str();
672 std::string getNodeLabel(const MachineBasicBlock *Node,
673 const MachineFunction *Graph) {
674 std::string OutStr;
676 raw_string_ostream OSS(OutStr);
678 if (isSimple()) {
679 OSS << printMBBReference(*Node);
680 if (const BasicBlock *BB = Node->getBasicBlock())
681 OSS << ": " << BB->getName();
682 } else
683 Node->print(OSS);
686 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
688 // Process string output to make it nicer...
689 for (unsigned i = 0; i != OutStr.length(); ++i)
690 if (OutStr[i] == '\n') { // Left justify
691 OutStr[i] = '\\';
692 OutStr.insert(OutStr.begin()+i+1, 'l');
694 return OutStr;
698 } // end namespace llvm
700 void MachineFunction::viewCFG() const
702 #ifndef NDEBUG
703 ViewGraph(this, "mf" + getName());
704 #else
705 errs() << "MachineFunction::viewCFG is only available in debug builds on "
706 << "systems with Graphviz or gv!\n";
707 #endif // NDEBUG
710 void MachineFunction::viewCFGOnly() const
712 #ifndef NDEBUG
713 ViewGraph(this, "mf" + getName(), true);
714 #else
715 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
716 << "systems with Graphviz or gv!\n";
717 #endif // NDEBUG
720 /// Add the specified physical register as a live-in value and
721 /// create a corresponding virtual register for it.
722 Register MachineFunction::addLiveIn(MCRegister PReg,
723 const TargetRegisterClass *RC) {
724 MachineRegisterInfo &MRI = getRegInfo();
725 Register VReg = MRI.getLiveInVirtReg(PReg);
726 if (VReg) {
727 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
728 (void)VRegRC;
729 // A physical register can be added several times.
730 // Between two calls, the register class of the related virtual register
731 // may have been constrained to match some operation constraints.
732 // In that case, check that the current register class includes the
733 // physical register and is a sub class of the specified RC.
734 assert((VRegRC == RC || (VRegRC->contains(PReg) &&
735 RC->hasSubClassEq(VRegRC))) &&
736 "Register class mismatch!");
737 return VReg;
739 VReg = MRI.createVirtualRegister(RC);
740 MRI.addLiveIn(PReg, VReg);
741 return VReg;
744 /// Return the MCSymbol for the specified non-empty jump table.
745 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
746 /// normal 'L' label is returned.
747 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
748 bool isLinkerPrivate) const {
749 const DataLayout &DL = getDataLayout();
750 assert(JumpTableInfo && "No jump tables");
751 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
753 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
754 : DL.getPrivateGlobalPrefix();
755 SmallString<60> Name;
756 raw_svector_ostream(Name)
757 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
758 return Ctx.getOrCreateSymbol(Name);
761 /// Return a function-local symbol to represent the PIC base.
762 MCSymbol *MachineFunction::getPICBaseSymbol() const {
763 const DataLayout &DL = getDataLayout();
764 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
765 Twine(getFunctionNumber()) + "$pb");
768 /// \name Exception Handling
769 /// \{
771 LandingPadInfo &
772 MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) {
773 unsigned N = LandingPads.size();
774 for (unsigned i = 0; i < N; ++i) {
775 LandingPadInfo &LP = LandingPads[i];
776 if (LP.LandingPadBlock == LandingPad)
777 return LP;
780 LandingPads.push_back(LandingPadInfo(LandingPad));
781 return LandingPads[N];
784 void MachineFunction::addInvoke(MachineBasicBlock *LandingPad,
785 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
786 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
787 LP.BeginLabels.push_back(BeginLabel);
788 LP.EndLabels.push_back(EndLabel);
791 MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) {
792 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
793 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
794 LP.LandingPadLabel = LandingPadLabel;
796 const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI();
797 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
798 // If there's no typeid list specified, then "cleanup" is implicit.
799 // Otherwise, id 0 is reserved for the cleanup action.
800 if (LPI->isCleanup() && LPI->getNumClauses() != 0)
801 LP.TypeIds.push_back(0);
803 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
804 // correct, but we need to do it this way because of how the DWARF EH
805 // emitter processes the clauses.
806 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
807 Value *Val = LPI->getClause(I - 1);
808 if (LPI->isCatch(I - 1)) {
809 LP.TypeIds.push_back(
810 getTypeIDFor(dyn_cast<GlobalValue>(Val->stripPointerCasts())));
811 } else {
812 // Add filters in a list.
813 auto *CVal = cast<Constant>(Val);
814 SmallVector<unsigned, 4> FilterList;
815 for (const Use &U : CVal->operands())
816 FilterList.push_back(
817 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts())));
819 LP.TypeIds.push_back(getFilterIDFor(FilterList));
823 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
824 for (unsigned I = CPI->arg_size(); I != 0; --I) {
825 auto *TypeInfo =
826 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts());
827 LP.TypeIds.push_back(getTypeIDFor(TypeInfo));
830 } else {
831 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
834 return LandingPadLabel;
837 void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
838 ArrayRef<unsigned> Sites) {
839 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
842 unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) {
843 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
844 if (TypeInfos[i] == TI) return i + 1;
846 TypeInfos.push_back(TI);
847 return TypeInfos.size();
850 int MachineFunction::getFilterIDFor(ArrayRef<unsigned> TyIds) {
851 // If the new filter coincides with the tail of an existing filter, then
852 // re-use the existing filter. Folding filters more than this requires
853 // re-ordering filters and/or their elements - probably not worth it.
854 for (unsigned i : FilterEnds) {
855 unsigned j = TyIds.size();
857 while (i && j)
858 if (FilterIds[--i] != TyIds[--j])
859 goto try_next;
861 if (!j)
862 // The new filter coincides with range [i, end) of the existing filter.
863 return -(1 + i);
865 try_next:;
868 // Add the new filter.
869 int FilterID = -(1 + FilterIds.size());
870 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
871 llvm::append_range(FilterIds, TyIds);
872 FilterEnds.push_back(FilterIds.size());
873 FilterIds.push_back(0); // terminator
874 return FilterID;
877 MachineFunction::CallSiteInfoMap::iterator
878 MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
879 assert(MI->isCandidateForCallSiteEntry() &&
880 "Call site info refers only to call (MI) candidates");
882 if (!Target.Options.EmitCallSiteInfo)
883 return CallSitesInfo.end();
884 return CallSitesInfo.find(MI);
887 /// Return the call machine instruction or find a call within bundle.
888 static const MachineInstr *getCallInstr(const MachineInstr *MI) {
889 if (!MI->isBundle())
890 return MI;
892 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()),
893 getBundleEnd(MI->getIterator())))
894 if (BMI.isCandidateForCallSiteEntry())
895 return &BMI;
897 llvm_unreachable("Unexpected bundle without a call site candidate");
900 void MachineFunction::eraseCallSiteInfo(const MachineInstr *MI) {
901 assert(MI->shouldUpdateCallSiteInfo() &&
902 "Call site info refers only to call (MI) candidates or "
903 "candidates inside bundles");
905 const MachineInstr *CallMI = getCallInstr(MI);
906 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
907 if (CSIt == CallSitesInfo.end())
908 return;
909 CallSitesInfo.erase(CSIt);
912 void MachineFunction::copyCallSiteInfo(const MachineInstr *Old,
913 const MachineInstr *New) {
914 assert(Old->shouldUpdateCallSiteInfo() &&
915 "Call site info refers only to call (MI) candidates or "
916 "candidates inside bundles");
918 if (!New->isCandidateForCallSiteEntry())
919 return eraseCallSiteInfo(Old);
921 const MachineInstr *OldCallMI = getCallInstr(Old);
922 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
923 if (CSIt == CallSitesInfo.end())
924 return;
926 CallSiteInfo CSInfo = CSIt->second;
927 CallSitesInfo[New] = CSInfo;
930 void MachineFunction::moveCallSiteInfo(const MachineInstr *Old,
931 const MachineInstr *New) {
932 assert(Old->shouldUpdateCallSiteInfo() &&
933 "Call site info refers only to call (MI) candidates or "
934 "candidates inside bundles");
936 if (!New->isCandidateForCallSiteEntry())
937 return eraseCallSiteInfo(Old);
939 const MachineInstr *OldCallMI = getCallInstr(Old);
940 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
941 if (CSIt == CallSitesInfo.end())
942 return;
944 CallSiteInfo CSInfo = std::move(CSIt->second);
945 CallSitesInfo.erase(CSIt);
946 CallSitesInfo[New] = CSInfo;
949 void MachineFunction::setDebugInstrNumberingCount(unsigned Num) {
950 DebugInstrNumberingCount = Num;
953 void MachineFunction::makeDebugValueSubstitution(DebugInstrOperandPair A,
954 DebugInstrOperandPair B,
955 unsigned Subreg) {
956 // Catch any accidental self-loops.
957 assert(A.first != B.first);
958 // Don't allow any substitutions _from_ the memory operand number.
959 assert(A.second != DebugOperandMemNumber);
961 DebugValueSubstitutions.push_back({A, B, Subreg});
964 void MachineFunction::substituteDebugValuesForInst(const MachineInstr &Old,
965 MachineInstr &New,
966 unsigned MaxOperand) {
967 // If the Old instruction wasn't tracked at all, there is no work to do.
968 unsigned OldInstrNum = Old.peekDebugInstrNum();
969 if (!OldInstrNum)
970 return;
972 // Iterate over all operands looking for defs to create substitutions for.
973 // Avoid creating new instr numbers unless we create a new substitution.
974 // While this has no functional effect, it risks confusing someone reading
975 // MIR output.
976 // Examine all the operands, or the first N specified by the caller.
977 MaxOperand = std::min(MaxOperand, Old.getNumOperands());
978 for (unsigned int I = 0; I < MaxOperand; ++I) {
979 const auto &OldMO = Old.getOperand(I);
980 auto &NewMO = New.getOperand(I);
981 (void)NewMO;
983 if (!OldMO.isReg() || !OldMO.isDef())
984 continue;
985 assert(NewMO.isDef());
987 unsigned NewInstrNum = New.getDebugInstrNum();
988 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
989 std::make_pair(NewInstrNum, I));
993 auto MachineFunction::salvageCopySSA(
994 MachineInstr &MI, DenseMap<Register, DebugInstrOperandPair> &DbgPHICache)
995 -> DebugInstrOperandPair {
996 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo();
998 // Check whether this copy-like instruction has already been salvaged into
999 // an operand pair.
1000 Register Dest;
1001 if (auto CopyDstSrc = TII.isCopyInstr(MI)) {
1002 Dest = CopyDstSrc->Destination->getReg();
1003 } else {
1004 assert(MI.isSubregToReg());
1005 Dest = MI.getOperand(0).getReg();
1008 auto CacheIt = DbgPHICache.find(Dest);
1009 if (CacheIt != DbgPHICache.end())
1010 return CacheIt->second;
1012 // Calculate the instruction number to use, or install a DBG_PHI.
1013 auto OperandPair = salvageCopySSAImpl(MI);
1014 DbgPHICache.insert({Dest, OperandPair});
1015 return OperandPair;
1018 auto MachineFunction::salvageCopySSAImpl(MachineInstr &MI)
1019 -> DebugInstrOperandPair {
1020 MachineRegisterInfo &MRI = getRegInfo();
1021 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
1022 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo();
1024 // Chase the value read by a copy-like instruction back to the instruction
1025 // that ultimately _defines_ that value. This may pass:
1026 // * Through multiple intermediate copies, including subregister moves /
1027 // copies,
1028 // * Copies from physical registers that must then be traced back to the
1029 // defining instruction,
1030 // * Or, physical registers may be live-in to (only) the entry block, which
1031 // requires a DBG_PHI to be created.
1032 // We can pursue this problem in that order: trace back through copies,
1033 // optionally through a physical register, to a defining instruction. We
1034 // should never move from physreg to vreg. As we're still in SSA form, no need
1035 // to worry about partial definitions of registers.
1037 // Helper lambda to interpret a copy-like instruction. Takes instruction,
1038 // returns the register read and any subregister identifying which part is
1039 // read.
1040 auto GetRegAndSubreg =
1041 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> {
1042 Register NewReg, OldReg;
1043 unsigned SubReg;
1044 if (Cpy.isCopy()) {
1045 OldReg = Cpy.getOperand(0).getReg();
1046 NewReg = Cpy.getOperand(1).getReg();
1047 SubReg = Cpy.getOperand(1).getSubReg();
1048 } else if (Cpy.isSubregToReg()) {
1049 OldReg = Cpy.getOperand(0).getReg();
1050 NewReg = Cpy.getOperand(2).getReg();
1051 SubReg = Cpy.getOperand(3).getImm();
1052 } else {
1053 auto CopyDetails = *TII.isCopyInstr(Cpy);
1054 const MachineOperand &Src = *CopyDetails.Source;
1055 const MachineOperand &Dest = *CopyDetails.Destination;
1056 OldReg = Dest.getReg();
1057 NewReg = Src.getReg();
1058 SubReg = Src.getSubReg();
1061 return {NewReg, SubReg};
1064 // First seek either the defining instruction, or a copy from a physreg.
1065 // During search, the current state is the current copy instruction, and which
1066 // register we've read. Accumulate qualifying subregisters into SubregsSeen;
1067 // deal with those later.
1068 auto State = GetRegAndSubreg(MI);
1069 auto CurInst = MI.getIterator();
1070 SmallVector<unsigned, 4> SubregsSeen;
1071 while (true) {
1072 // If we've found a copy from a physreg, first portion of search is over.
1073 if (!State.first.isVirtual())
1074 break;
1076 // Record any subregister qualifier.
1077 if (State.second)
1078 SubregsSeen.push_back(State.second);
1080 assert(MRI.hasOneDef(State.first));
1081 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent();
1082 CurInst = Inst.getIterator();
1084 // Any non-copy instruction is the defining instruction we're seeking.
1085 if (!Inst.isCopyLike() && !TII.isCopyInstr(Inst))
1086 break;
1087 State = GetRegAndSubreg(Inst);
1090 // Helper lambda to apply additional subregister substitutions to a known
1091 // instruction/operand pair. Adds new (fake) substitutions so that we can
1092 // record the subregister. FIXME: this isn't very space efficient if multiple
1093 // values are tracked back through the same copies; cache something later.
1094 auto ApplySubregisters =
1095 [&](DebugInstrOperandPair P) -> DebugInstrOperandPair {
1096 for (unsigned Subreg : reverse(SubregsSeen)) {
1097 // Fetch a new instruction number, not attached to an actual instruction.
1098 unsigned NewInstrNumber = getNewDebugInstrNum();
1099 // Add a substitution from the "new" number to the known one, with a
1100 // qualifying subreg.
1101 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg);
1102 // Return the new number; to find the underlying value, consumers need to
1103 // deal with the qualifying subreg.
1104 P = {NewInstrNumber, 0};
1106 return P;
1109 // If we managed to find the defining instruction after COPYs, return an
1110 // instruction / operand pair after adding subregister qualifiers.
1111 if (State.first.isVirtual()) {
1112 // Virtual register def -- we can just look up where this happens.
1113 MachineInstr *Inst = MRI.def_begin(State.first)->getParent();
1114 for (auto &MO : Inst->all_defs()) {
1115 if (MO.getReg() != State.first)
1116 continue;
1117 return ApplySubregisters({Inst->getDebugInstrNum(), MO.getOperandNo()});
1120 llvm_unreachable("Vreg def with no corresponding operand?");
1123 // Our search ended in a copy from a physreg: walk back up the function
1124 // looking for whatever defines the physreg.
1125 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst));
1126 State = GetRegAndSubreg(*CurInst);
1127 Register RegToSeek = State.first;
1129 auto RMII = CurInst->getReverseIterator();
1130 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend());
1131 for (auto &ToExamine : PrevInstrs) {
1132 for (auto &MO : ToExamine.all_defs()) {
1133 // Test for operand that defines something aliasing RegToSeek.
1134 if (!TRI.regsOverlap(RegToSeek, MO.getReg()))
1135 continue;
1137 return ApplySubregisters(
1138 {ToExamine.getDebugInstrNum(), MO.getOperandNo()});
1142 MachineBasicBlock &InsertBB = *CurInst->getParent();
1144 // We reached the start of the block before finding a defining instruction.
1145 // There are numerous scenarios where this can happen:
1146 // * Constant physical registers,
1147 // * Several intrinsics that allow LLVM-IR to read arbitary registers,
1148 // * Arguments in the entry block,
1149 // * Exception handling landing pads.
1150 // Validating all of them is too difficult, so just insert a DBG_PHI reading
1151 // the variable value at this position, rather than checking it makes sense.
1153 // Create DBG_PHI for specified physreg.
1154 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(),
1155 TII.get(TargetOpcode::DBG_PHI));
1156 Builder.addReg(State.first);
1157 unsigned NewNum = getNewDebugInstrNum();
1158 Builder.addImm(NewNum);
1159 return ApplySubregisters({NewNum, 0u});
1162 void MachineFunction::finalizeDebugInstrRefs() {
1163 auto *TII = getSubtarget().getInstrInfo();
1165 auto MakeUndefDbgValue = [&](MachineInstr &MI) {
1166 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST);
1167 MI.setDesc(RefII);
1168 MI.setDebugValueUndef();
1171 DenseMap<Register, DebugInstrOperandPair> ArgDbgPHIs;
1172 for (auto &MBB : *this) {
1173 for (auto &MI : MBB) {
1174 if (!MI.isDebugRef())
1175 continue;
1177 bool IsValidRef = true;
1179 for (MachineOperand &MO : MI.debug_operands()) {
1180 if (!MO.isReg())
1181 continue;
1183 Register Reg = MO.getReg();
1185 // Some vregs can be deleted as redundant in the meantime. Mark those
1186 // as DBG_VALUE $noreg. Additionally, some normal instructions are
1187 // quickly deleted, leaving dangling references to vregs with no def.
1188 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) {
1189 IsValidRef = false;
1190 break;
1193 assert(Reg.isVirtual());
1194 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg);
1196 // If we've found a copy-like instruction, follow it back to the
1197 // instruction that defines the source value, see salvageCopySSA docs
1198 // for why this is important.
1199 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) {
1200 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs);
1201 MO.ChangeToDbgInstrRef(Result.first, Result.second);
1202 } else {
1203 // Otherwise, identify the operand number that the VReg refers to.
1204 unsigned OperandIdx = 0;
1205 for (const auto &DefMO : DefMI.operands()) {
1206 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg)
1207 break;
1208 ++OperandIdx;
1210 assert(OperandIdx < DefMI.getNumOperands());
1212 // Morph this instr ref to point at the given instruction and operand.
1213 unsigned ID = DefMI.getDebugInstrNum();
1214 MO.ChangeToDbgInstrRef(ID, OperandIdx);
1218 if (!IsValidRef)
1219 MakeUndefDbgValue(MI);
1224 bool MachineFunction::shouldUseDebugInstrRef() const {
1225 // Disable instr-ref at -O0: it's very slow (in compile time). We can still
1226 // have optimized code inlined into this unoptimized code, however with
1227 // fewer and less aggressive optimizations happening, coverage and accuracy
1228 // should not suffer.
1229 if (getTarget().getOptLevel() == CodeGenOptLevel::None)
1230 return false;
1232 // Don't use instr-ref if this function is marked optnone.
1233 if (F.hasFnAttribute(Attribute::OptimizeNone))
1234 return false;
1236 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple()))
1237 return true;
1239 return false;
1242 bool MachineFunction::useDebugInstrRef() const {
1243 return UseDebugInstrRef;
1246 void MachineFunction::setUseDebugInstrRef(bool Use) {
1247 UseDebugInstrRef = Use;
1250 // Use one million as a high / reserved number.
1251 const unsigned MachineFunction::DebugOperandMemNumber = 1000000;
1253 /// \}
1255 //===----------------------------------------------------------------------===//
1256 // MachineJumpTableInfo implementation
1257 //===----------------------------------------------------------------------===//
1259 /// Return the size of each entry in the jump table.
1260 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
1261 // The size of a jump table entry is 4 bytes unless the entry is just the
1262 // address of a block, in which case it is the pointer size.
1263 switch (getEntryKind()) {
1264 case MachineJumpTableInfo::EK_BlockAddress:
1265 return TD.getPointerSize();
1266 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1267 case MachineJumpTableInfo::EK_LabelDifference64:
1268 return 8;
1269 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1270 case MachineJumpTableInfo::EK_LabelDifference32:
1271 case MachineJumpTableInfo::EK_Custom32:
1272 return 4;
1273 case MachineJumpTableInfo::EK_Inline:
1274 return 0;
1276 llvm_unreachable("Unknown jump table encoding!");
1279 /// Return the alignment of each entry in the jump table.
1280 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
1281 // The alignment of a jump table entry is the alignment of int32 unless the
1282 // entry is just the address of a block, in which case it is the pointer
1283 // alignment.
1284 switch (getEntryKind()) {
1285 case MachineJumpTableInfo::EK_BlockAddress:
1286 return TD.getPointerABIAlignment(0).value();
1287 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1288 case MachineJumpTableInfo::EK_LabelDifference64:
1289 return TD.getABIIntegerTypeAlignment(64).value();
1290 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1291 case MachineJumpTableInfo::EK_LabelDifference32:
1292 case MachineJumpTableInfo::EK_Custom32:
1293 return TD.getABIIntegerTypeAlignment(32).value();
1294 case MachineJumpTableInfo::EK_Inline:
1295 return 1;
1297 llvm_unreachable("Unknown jump table encoding!");
1300 /// Create a new jump table entry in the jump table info.
1301 unsigned MachineJumpTableInfo::createJumpTableIndex(
1302 const std::vector<MachineBasicBlock*> &DestBBs) {
1303 assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1304 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1305 return JumpTables.size()-1;
1308 /// If Old is the target of any jump tables, update the jump tables to branch
1309 /// to New instead.
1310 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
1311 MachineBasicBlock *New) {
1312 assert(Old != New && "Not making a change?");
1313 bool MadeChange = false;
1314 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1315 ReplaceMBBInJumpTable(i, Old, New);
1316 return MadeChange;
1319 /// If MBB is present in any jump tables, remove it.
1320 bool MachineJumpTableInfo::RemoveMBBFromJumpTables(MachineBasicBlock *MBB) {
1321 bool MadeChange = false;
1322 for (MachineJumpTableEntry &JTE : JumpTables) {
1323 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1324 MadeChange |= (removeBeginItr != JTE.MBBs.end());
1325 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1327 return MadeChange;
1330 /// If Old is a target of the jump tables, update the jump table to branch to
1331 /// New instead.
1332 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
1333 MachineBasicBlock *Old,
1334 MachineBasicBlock *New) {
1335 assert(Old != New && "Not making a change?");
1336 bool MadeChange = false;
1337 MachineJumpTableEntry &JTE = JumpTables[Idx];
1338 for (MachineBasicBlock *&MBB : JTE.MBBs)
1339 if (MBB == Old) {
1340 MBB = New;
1341 MadeChange = true;
1343 return MadeChange;
1346 void MachineJumpTableInfo::print(raw_ostream &OS) const {
1347 if (JumpTables.empty()) return;
1349 OS << "Jump Tables:\n";
1351 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1352 OS << printJumpTableEntryReference(i) << ':';
1353 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs)
1354 OS << ' ' << printMBBReference(*MBB);
1355 if (i != e)
1356 OS << '\n';
1359 OS << '\n';
1362 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1363 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); }
1364 #endif
1366 Printable llvm::printJumpTableEntryReference(unsigned Idx) {
1367 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1370 //===----------------------------------------------------------------------===//
1371 // MachineConstantPool implementation
1372 //===----------------------------------------------------------------------===//
1374 void MachineConstantPoolValue::anchor() {}
1376 unsigned MachineConstantPoolValue::getSizeInBytes(const DataLayout &DL) const {
1377 return DL.getTypeAllocSize(Ty);
1380 unsigned MachineConstantPoolEntry::getSizeInBytes(const DataLayout &DL) const {
1381 if (isMachineConstantPoolEntry())
1382 return Val.MachineCPVal->getSizeInBytes(DL);
1383 return DL.getTypeAllocSize(Val.ConstVal->getType());
1386 bool MachineConstantPoolEntry::needsRelocation() const {
1387 if (isMachineConstantPoolEntry())
1388 return true;
1389 return Val.ConstVal->needsDynamicRelocation();
1392 SectionKind
1393 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
1394 if (needsRelocation())
1395 return SectionKind::getReadOnlyWithRel();
1396 switch (getSizeInBytes(*DL)) {
1397 case 4:
1398 return SectionKind::getMergeableConst4();
1399 case 8:
1400 return SectionKind::getMergeableConst8();
1401 case 16:
1402 return SectionKind::getMergeableConst16();
1403 case 32:
1404 return SectionKind::getMergeableConst32();
1405 default:
1406 return SectionKind::getReadOnly();
1410 MachineConstantPool::~MachineConstantPool() {
1411 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1412 // so keep track of which we've deleted to avoid double deletions.
1413 DenseSet<MachineConstantPoolValue*> Deleted;
1414 for (const MachineConstantPoolEntry &C : Constants)
1415 if (C.isMachineConstantPoolEntry()) {
1416 Deleted.insert(C.Val.MachineCPVal);
1417 delete C.Val.MachineCPVal;
1419 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) {
1420 if (Deleted.count(CPV) == 0)
1421 delete CPV;
1425 /// Test whether the given two constants can be allocated the same constant pool
1426 /// entry referenced by \param A.
1427 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1428 const DataLayout &DL) {
1429 // Handle the trivial case quickly.
1430 if (A == B) return true;
1432 // If they have the same type but weren't the same constant, quickly
1433 // reject them.
1434 if (A->getType() == B->getType()) return false;
1436 // We can't handle structs or arrays.
1437 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1438 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1439 return false;
1441 // For now, only support constants with the same size.
1442 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1443 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1444 return false;
1446 bool ContainsUndefOrPoisonA = A->containsUndefOrPoisonElement();
1448 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1450 // Try constant folding a bitcast of both instructions to an integer. If we
1451 // get two identical ConstantInt's, then we are good to share them. We use
1452 // the constant folding APIs to do this so that we get the benefit of
1453 // DataLayout.
1454 if (isa<PointerType>(A->getType()))
1455 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1456 const_cast<Constant *>(A), IntTy, DL);
1457 else if (A->getType() != IntTy)
1458 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1459 IntTy, DL);
1460 if (isa<PointerType>(B->getType()))
1461 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1462 const_cast<Constant *>(B), IntTy, DL);
1463 else if (B->getType() != IntTy)
1464 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1465 IntTy, DL);
1467 if (A != B)
1468 return false;
1470 // Constants only safely match if A doesn't contain undef/poison.
1471 // As we'll be reusing A, it doesn't matter if B contain undef/poison.
1472 // TODO: Handle cases where A and B have the same undef/poison elements.
1473 // TODO: Merge A and B with mismatching undef/poison elements.
1474 return !ContainsUndefOrPoisonA;
1477 /// Create a new entry in the constant pool or return an existing one.
1478 /// User must specify the log2 of the minimum required alignment for the object.
1479 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
1480 Align Alignment) {
1481 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1483 // Check to see if we already have this constant.
1485 // FIXME, this could be made much more efficient for large constant pools.
1486 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1487 if (!Constants[i].isMachineConstantPoolEntry() &&
1488 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1489 if (Constants[i].getAlign() < Alignment)
1490 Constants[i].Alignment = Alignment;
1491 return i;
1494 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1495 return Constants.size()-1;
1498 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
1499 Align Alignment) {
1500 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1502 // Check to see if we already have this constant.
1504 // FIXME, this could be made much more efficient for large constant pools.
1505 int Idx = V->getExistingMachineCPValue(this, Alignment);
1506 if (Idx != -1) {
1507 MachineCPVsSharingEntries.insert(V);
1508 return (unsigned)Idx;
1511 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1512 return Constants.size()-1;
1515 void MachineConstantPool::print(raw_ostream &OS) const {
1516 if (Constants.empty()) return;
1518 OS << "Constant Pool:\n";
1519 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1520 OS << " cp#" << i << ": ";
1521 if (Constants[i].isMachineConstantPoolEntry())
1522 Constants[i].Val.MachineCPVal->print(OS);
1523 else
1524 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1525 OS << ", align=" << Constants[i].getAlign().value();
1526 OS << "\n";
1530 //===----------------------------------------------------------------------===//
1531 // Template specialization for MachineFunction implementation of
1532 // ProfileSummaryInfo::getEntryCount().
1533 //===----------------------------------------------------------------------===//
1534 template <>
1535 std::optional<Function::ProfileCount>
1536 ProfileSummaryInfo::getEntryCount<llvm::MachineFunction>(
1537 const llvm::MachineFunction *F) const {
1538 return F->getFunction().getEntryCount();
1541 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1542 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); }
1543 #endif