Lift EHPersonalities from Analysis to IR (NFC)
[llvm-project.git] / llvm / lib / CodeGen / MachineFunction.cpp
blob243587394463cd13f04e3bb4d669f882654b1dcf
1 //===- MachineFunction.cpp ------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Collect native machine code information for a function. This allows
10 // target-specific information about the generated code to be stored with each
11 // function.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/ADT/BitVector.h"
17 #include "llvm/ADT/DenseMap.h"
18 #include "llvm/ADT/DenseSet.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallString.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/StringRef.h"
23 #include "llvm/ADT/Twine.h"
24 #include "llvm/Analysis/ConstantFolding.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineInstr.h"
29 #include "llvm/CodeGen/MachineJumpTableInfo.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/PseudoSourceValue.h"
34 #include "llvm/CodeGen/TargetFrameLowering.h"
35 #include "llvm/CodeGen/TargetInstrInfo.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/CodeGen/TargetSubtargetInfo.h"
39 #include "llvm/CodeGen/WasmEHFuncInfo.h"
40 #include "llvm/CodeGen/WinEHFuncInfo.h"
41 #include "llvm/Config/llvm-config.h"
42 #include "llvm/IR/Attributes.h"
43 #include "llvm/IR/BasicBlock.h"
44 #include "llvm/IR/Constant.h"
45 #include "llvm/IR/DataLayout.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/EHPersonalities.h"
48 #include "llvm/IR/Function.h"
49 #include "llvm/IR/GlobalValue.h"
50 #include "llvm/IR/Instruction.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/Metadata.h"
53 #include "llvm/IR/Module.h"
54 #include "llvm/IR/ModuleSlotTracker.h"
55 #include "llvm/IR/Value.h"
56 #include "llvm/MC/MCContext.h"
57 #include "llvm/MC/MCSymbol.h"
58 #include "llvm/MC/SectionKind.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/DOTGraphTraits.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/GraphWriter.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Target/TargetMachine.h"
67 #include <algorithm>
68 #include <cassert>
69 #include <cstddef>
70 #include <cstdint>
71 #include <iterator>
72 #include <string>
73 #include <type_traits>
74 #include <utility>
75 #include <vector>
77 #include "LiveDebugValues/LiveDebugValues.h"
79 using namespace llvm;
81 #define DEBUG_TYPE "codegen"
83 static cl::opt<unsigned> AlignAllFunctions(
84 "align-all-functions",
85 cl::desc("Force the alignment of all functions in log2 format (e.g. 4 "
86 "means align on 16B boundaries)."),
87 cl::init(0), cl::Hidden);
89 static const char *getPropertyName(MachineFunctionProperties::Property Prop) {
90 using P = MachineFunctionProperties::Property;
92 // clang-format off
93 switch(Prop) {
94 case P::FailedISel: return "FailedISel";
95 case P::IsSSA: return "IsSSA";
96 case P::Legalized: return "Legalized";
97 case P::NoPHIs: return "NoPHIs";
98 case P::NoVRegs: return "NoVRegs";
99 case P::RegBankSelected: return "RegBankSelected";
100 case P::Selected: return "Selected";
101 case P::TracksLiveness: return "TracksLiveness";
102 case P::TiedOpsRewritten: return "TiedOpsRewritten";
103 case P::FailsVerification: return "FailsVerification";
104 case P::TracksDebugUserValues: return "TracksDebugUserValues";
106 // clang-format on
107 llvm_unreachable("Invalid machine function property");
110 void setUnsafeStackSize(const Function &F, MachineFrameInfo &FrameInfo) {
111 if (!F.hasFnAttribute(Attribute::SafeStack))
112 return;
114 auto *Existing =
115 dyn_cast_or_null<MDTuple>(F.getMetadata(LLVMContext::MD_annotation));
117 if (!Existing || Existing->getNumOperands() != 2)
118 return;
120 auto *MetadataName = "unsafe-stack-size";
121 if (auto &N = Existing->getOperand(0)) {
122 if (cast<MDString>(N.get())->getString() == MetadataName) {
123 if (auto &Op = Existing->getOperand(1)) {
124 auto Val = mdconst::extract<ConstantInt>(Op)->getZExtValue();
125 FrameInfo.setUnsafeStackSize(Val);
131 // Pin the vtable to this file.
132 void MachineFunction::Delegate::anchor() {}
134 void MachineFunctionProperties::print(raw_ostream &OS) const {
135 const char *Separator = "";
136 for (BitVector::size_type I = 0; I < Properties.size(); ++I) {
137 if (!Properties[I])
138 continue;
139 OS << Separator << getPropertyName(static_cast<Property>(I));
140 Separator = ", ";
144 //===----------------------------------------------------------------------===//
145 // MachineFunction implementation
146 //===----------------------------------------------------------------------===//
148 // Out-of-line virtual method.
149 MachineFunctionInfo::~MachineFunctionInfo() = default;
151 void ilist_alloc_traits<MachineBasicBlock>::deleteNode(MachineBasicBlock *MBB) {
152 MBB->getParent()->deleteMachineBasicBlock(MBB);
155 static inline Align getFnStackAlignment(const TargetSubtargetInfo *STI,
156 const Function &F) {
157 if (auto MA = F.getFnStackAlign())
158 return *MA;
159 return STI->getFrameLowering()->getStackAlign();
162 MachineFunction::MachineFunction(Function &F, const LLVMTargetMachine &Target,
163 const TargetSubtargetInfo &STI,
164 unsigned FunctionNum, MachineModuleInfo &mmi)
165 : F(F), Target(Target), STI(&STI), Ctx(mmi.getContext()), MMI(mmi) {
166 FunctionNumber = FunctionNum;
167 init();
170 void MachineFunction::handleInsertion(MachineInstr &MI) {
171 if (TheDelegate)
172 TheDelegate->MF_HandleInsertion(MI);
175 void MachineFunction::handleRemoval(MachineInstr &MI) {
176 if (TheDelegate)
177 TheDelegate->MF_HandleRemoval(MI);
180 void MachineFunction::init() {
181 // Assume the function starts in SSA form with correct liveness.
182 Properties.set(MachineFunctionProperties::Property::IsSSA);
183 Properties.set(MachineFunctionProperties::Property::TracksLiveness);
184 if (STI->getRegisterInfo())
185 RegInfo = new (Allocator) MachineRegisterInfo(this);
186 else
187 RegInfo = nullptr;
189 MFInfo = nullptr;
191 // We can realign the stack if the target supports it and the user hasn't
192 // explicitly asked us not to.
193 bool CanRealignSP = STI->getFrameLowering()->isStackRealignable() &&
194 !F.hasFnAttribute("no-realign-stack");
195 FrameInfo = new (Allocator) MachineFrameInfo(
196 getFnStackAlignment(STI, F), /*StackRealignable=*/CanRealignSP,
197 /*ForcedRealign=*/CanRealignSP &&
198 F.hasFnAttribute(Attribute::StackAlignment));
200 setUnsafeStackSize(F, *FrameInfo);
202 if (F.hasFnAttribute(Attribute::StackAlignment))
203 FrameInfo->ensureMaxAlignment(*F.getFnStackAlign());
205 ConstantPool = new (Allocator) MachineConstantPool(getDataLayout());
206 Alignment = STI->getTargetLowering()->getMinFunctionAlignment();
208 // FIXME: Shouldn't use pref alignment if explicit alignment is set on F.
209 // FIXME: Use Function::hasOptSize().
210 if (!F.hasFnAttribute(Attribute::OptimizeForSize))
211 Alignment = std::max(Alignment,
212 STI->getTargetLowering()->getPrefFunctionAlignment());
214 if (AlignAllFunctions)
215 Alignment = Align(1ULL << AlignAllFunctions);
217 JumpTableInfo = nullptr;
219 if (isFuncletEHPersonality(classifyEHPersonality(
220 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
221 WinEHInfo = new (Allocator) WinEHFuncInfo();
224 if (isScopedEHPersonality(classifyEHPersonality(
225 F.hasPersonalityFn() ? F.getPersonalityFn() : nullptr))) {
226 WasmEHInfo = new (Allocator) WasmEHFuncInfo();
229 assert(Target.isCompatibleDataLayout(getDataLayout()) &&
230 "Can't create a MachineFunction using a Module with a "
231 "Target-incompatible DataLayout attached\n");
233 PSVManager = std::make_unique<PseudoSourceValueManager>(getTarget());
236 void MachineFunction::initTargetMachineFunctionInfo(
237 const TargetSubtargetInfo &STI) {
238 assert(!MFInfo && "MachineFunctionInfo already set");
239 MFInfo = Target.createMachineFunctionInfo(Allocator, F, &STI);
242 MachineFunction::~MachineFunction() {
243 clear();
246 void MachineFunction::clear() {
247 Properties.reset();
248 // Don't call destructors on MachineInstr and MachineOperand. All of their
249 // memory comes from the BumpPtrAllocator which is about to be purged.
251 // Do call MachineBasicBlock destructors, it contains std::vectors.
252 for (iterator I = begin(), E = end(); I != E; I = BasicBlocks.erase(I))
253 I->Insts.clearAndLeakNodesUnsafely();
254 MBBNumbering.clear();
256 InstructionRecycler.clear(Allocator);
257 OperandRecycler.clear(Allocator);
258 BasicBlockRecycler.clear(Allocator);
259 CodeViewAnnotations.clear();
260 VariableDbgInfos.clear();
261 if (RegInfo) {
262 RegInfo->~MachineRegisterInfo();
263 Allocator.Deallocate(RegInfo);
265 if (MFInfo) {
266 MFInfo->~MachineFunctionInfo();
267 Allocator.Deallocate(MFInfo);
270 FrameInfo->~MachineFrameInfo();
271 Allocator.Deallocate(FrameInfo);
273 ConstantPool->~MachineConstantPool();
274 Allocator.Deallocate(ConstantPool);
276 if (JumpTableInfo) {
277 JumpTableInfo->~MachineJumpTableInfo();
278 Allocator.Deallocate(JumpTableInfo);
281 if (WinEHInfo) {
282 WinEHInfo->~WinEHFuncInfo();
283 Allocator.Deallocate(WinEHInfo);
286 if (WasmEHInfo) {
287 WasmEHInfo->~WasmEHFuncInfo();
288 Allocator.Deallocate(WasmEHInfo);
292 const DataLayout &MachineFunction::getDataLayout() const {
293 return F.getParent()->getDataLayout();
296 /// Get the JumpTableInfo for this function.
297 /// If it does not already exist, allocate one.
298 MachineJumpTableInfo *MachineFunction::
299 getOrCreateJumpTableInfo(unsigned EntryKind) {
300 if (JumpTableInfo) return JumpTableInfo;
302 JumpTableInfo = new (Allocator)
303 MachineJumpTableInfo((MachineJumpTableInfo::JTEntryKind)EntryKind);
304 return JumpTableInfo;
307 DenormalMode MachineFunction::getDenormalMode(const fltSemantics &FPType) const {
308 return F.getDenormalMode(FPType);
311 /// Should we be emitting segmented stack stuff for the function
312 bool MachineFunction::shouldSplitStack() const {
313 return getFunction().hasFnAttribute("split-stack");
316 [[nodiscard]] unsigned
317 MachineFunction::addFrameInst(const MCCFIInstruction &Inst) {
318 FrameInstructions.push_back(Inst);
319 return FrameInstructions.size() - 1;
322 /// This discards all of the MachineBasicBlock numbers and recomputes them.
323 /// This guarantees that the MBB numbers are sequential, dense, and match the
324 /// ordering of the blocks within the function. If a specific MachineBasicBlock
325 /// is specified, only that block and those after it are renumbered.
326 void MachineFunction::RenumberBlocks(MachineBasicBlock *MBB) {
327 if (empty()) { MBBNumbering.clear(); return; }
328 MachineFunction::iterator MBBI, E = end();
329 if (MBB == nullptr)
330 MBBI = begin();
331 else
332 MBBI = MBB->getIterator();
334 // Figure out the block number this should have.
335 unsigned BlockNo = 0;
336 if (MBBI != begin())
337 BlockNo = std::prev(MBBI)->getNumber() + 1;
339 for (; MBBI != E; ++MBBI, ++BlockNo) {
340 if (MBBI->getNumber() != (int)BlockNo) {
341 // Remove use of the old number.
342 if (MBBI->getNumber() != -1) {
343 assert(MBBNumbering[MBBI->getNumber()] == &*MBBI &&
344 "MBB number mismatch!");
345 MBBNumbering[MBBI->getNumber()] = nullptr;
348 // If BlockNo is already taken, set that block's number to -1.
349 if (MBBNumbering[BlockNo])
350 MBBNumbering[BlockNo]->setNumber(-1);
352 MBBNumbering[BlockNo] = &*MBBI;
353 MBBI->setNumber(BlockNo);
357 // Okay, all the blocks are renumbered. If we have compactified the block
358 // numbering, shrink MBBNumbering now.
359 assert(BlockNo <= MBBNumbering.size() && "Mismatch!");
360 MBBNumbering.resize(BlockNo);
363 /// This method iterates over the basic blocks and assigns their IsBeginSection
364 /// and IsEndSection fields. This must be called after MBB layout is finalized
365 /// and the SectionID's are assigned to MBBs.
366 void MachineFunction::assignBeginEndSections() {
367 front().setIsBeginSection();
368 auto CurrentSectionID = front().getSectionID();
369 for (auto MBBI = std::next(begin()), E = end(); MBBI != E; ++MBBI) {
370 if (MBBI->getSectionID() == CurrentSectionID)
371 continue;
372 MBBI->setIsBeginSection();
373 std::prev(MBBI)->setIsEndSection();
374 CurrentSectionID = MBBI->getSectionID();
376 back().setIsEndSection();
379 /// Allocate a new MachineInstr. Use this instead of `new MachineInstr'.
380 MachineInstr *MachineFunction::CreateMachineInstr(const MCInstrDesc &MCID,
381 DebugLoc DL,
382 bool NoImplicit) {
383 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
384 MachineInstr(*this, MCID, std::move(DL), NoImplicit);
387 /// Create a new MachineInstr which is a copy of the 'Orig' instruction,
388 /// identical in all ways except the instruction has no parent, prev, or next.
389 MachineInstr *
390 MachineFunction::CloneMachineInstr(const MachineInstr *Orig) {
391 return new (InstructionRecycler.Allocate<MachineInstr>(Allocator))
392 MachineInstr(*this, *Orig);
395 MachineInstr &MachineFunction::cloneMachineInstrBundle(
396 MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore,
397 const MachineInstr &Orig) {
398 MachineInstr *FirstClone = nullptr;
399 MachineBasicBlock::const_instr_iterator I = Orig.getIterator();
400 while (true) {
401 MachineInstr *Cloned = CloneMachineInstr(&*I);
402 MBB.insert(InsertBefore, Cloned);
403 if (FirstClone == nullptr) {
404 FirstClone = Cloned;
405 } else {
406 Cloned->bundleWithPred();
409 if (!I->isBundledWithSucc())
410 break;
411 ++I;
413 // Copy over call site info to the cloned instruction if needed. If Orig is in
414 // a bundle, copyCallSiteInfo takes care of finding the call instruction in
415 // the bundle.
416 if (Orig.shouldUpdateCallSiteInfo())
417 copyCallSiteInfo(&Orig, FirstClone);
418 return *FirstClone;
421 /// Delete the given MachineInstr.
423 /// This function also serves as the MachineInstr destructor - the real
424 /// ~MachineInstr() destructor must be empty.
425 void MachineFunction::deleteMachineInstr(MachineInstr *MI) {
426 // Verify that a call site info is at valid state. This assertion should
427 // be triggered during the implementation of support for the
428 // call site info of a new architecture. If the assertion is triggered,
429 // back trace will tell where to insert a call to updateCallSiteInfo().
430 assert((!MI->isCandidateForCallSiteEntry() ||
431 CallSitesInfo.find(MI) == CallSitesInfo.end()) &&
432 "Call site info was not updated!");
433 // Strip it for parts. The operand array and the MI object itself are
434 // independently recyclable.
435 if (MI->Operands)
436 deallocateOperandArray(MI->CapOperands, MI->Operands);
437 // Don't call ~MachineInstr() which must be trivial anyway because
438 // ~MachineFunction drops whole lists of MachineInstrs wihout calling their
439 // destructors.
440 InstructionRecycler.Deallocate(Allocator, MI);
443 /// Allocate a new MachineBasicBlock. Use this instead of
444 /// `new MachineBasicBlock'.
445 MachineBasicBlock *
446 MachineFunction::CreateMachineBasicBlock(const BasicBlock *bb) {
447 MachineBasicBlock *MBB =
448 new (BasicBlockRecycler.Allocate<MachineBasicBlock>(Allocator))
449 MachineBasicBlock(*this, bb);
450 // Set BBID for `-basic-block=sections=labels` and
451 // `-basic-block-sections=list` to allow robust mapping of profiles to basic
452 // blocks.
453 if (Target.getBBSectionsType() == BasicBlockSection::Labels ||
454 Target.getBBSectionsType() == BasicBlockSection::List)
455 MBB->setBBID(NextBBID++);
456 return MBB;
459 /// Delete the given MachineBasicBlock.
460 void MachineFunction::deleteMachineBasicBlock(MachineBasicBlock *MBB) {
461 assert(MBB->getParent() == this && "MBB parent mismatch!");
462 // Clean up any references to MBB in jump tables before deleting it.
463 if (JumpTableInfo)
464 JumpTableInfo->RemoveMBBFromJumpTables(MBB);
465 MBB->~MachineBasicBlock();
466 BasicBlockRecycler.Deallocate(Allocator, MBB);
469 MachineMemOperand *MachineFunction::getMachineMemOperand(
470 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s,
471 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
472 SyncScope::ID SSID, AtomicOrdering Ordering,
473 AtomicOrdering FailureOrdering) {
474 return new (Allocator)
475 MachineMemOperand(PtrInfo, f, s, base_alignment, AAInfo, Ranges,
476 SSID, Ordering, FailureOrdering);
479 MachineMemOperand *MachineFunction::getMachineMemOperand(
480 MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy,
481 Align base_alignment, const AAMDNodes &AAInfo, const MDNode *Ranges,
482 SyncScope::ID SSID, AtomicOrdering Ordering,
483 AtomicOrdering FailureOrdering) {
484 return new (Allocator)
485 MachineMemOperand(PtrInfo, f, MemTy, base_alignment, AAInfo, Ranges, SSID,
486 Ordering, FailureOrdering);
489 MachineMemOperand *MachineFunction::getMachineMemOperand(
490 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, uint64_t Size) {
491 return new (Allocator)
492 MachineMemOperand(PtrInfo, MMO->getFlags(), Size, MMO->getBaseAlign(),
493 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
494 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
497 MachineMemOperand *MachineFunction::getMachineMemOperand(
498 const MachineMemOperand *MMO, const MachinePointerInfo &PtrInfo, LLT Ty) {
499 return new (Allocator)
500 MachineMemOperand(PtrInfo, MMO->getFlags(), Ty, MMO->getBaseAlign(),
501 AAMDNodes(), nullptr, MMO->getSyncScopeID(),
502 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
505 MachineMemOperand *
506 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
507 int64_t Offset, LLT Ty) {
508 const MachinePointerInfo &PtrInfo = MMO->getPointerInfo();
510 // If there is no pointer value, the offset isn't tracked so we need to adjust
511 // the base alignment.
512 Align Alignment = PtrInfo.V.isNull()
513 ? commonAlignment(MMO->getBaseAlign(), Offset)
514 : MMO->getBaseAlign();
516 // Do not preserve ranges, since we don't necessarily know what the high bits
517 // are anymore.
518 return new (Allocator) MachineMemOperand(
519 PtrInfo.getWithOffset(Offset), MMO->getFlags(), Ty, Alignment,
520 MMO->getAAInfo(), nullptr, MMO->getSyncScopeID(),
521 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
524 MachineMemOperand *
525 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
526 const AAMDNodes &AAInfo) {
527 MachinePointerInfo MPI = MMO->getValue() ?
528 MachinePointerInfo(MMO->getValue(), MMO->getOffset()) :
529 MachinePointerInfo(MMO->getPseudoValue(), MMO->getOffset());
531 return new (Allocator) MachineMemOperand(
532 MPI, MMO->getFlags(), MMO->getSize(), MMO->getBaseAlign(), AAInfo,
533 MMO->getRanges(), MMO->getSyncScopeID(), MMO->getSuccessOrdering(),
534 MMO->getFailureOrdering());
537 MachineMemOperand *
538 MachineFunction::getMachineMemOperand(const MachineMemOperand *MMO,
539 MachineMemOperand::Flags Flags) {
540 return new (Allocator) MachineMemOperand(
541 MMO->getPointerInfo(), Flags, MMO->getSize(), MMO->getBaseAlign(),
542 MMO->getAAInfo(), MMO->getRanges(), MMO->getSyncScopeID(),
543 MMO->getSuccessOrdering(), MMO->getFailureOrdering());
546 MachineInstr::ExtraInfo *MachineFunction::createMIExtraInfo(
547 ArrayRef<MachineMemOperand *> MMOs, MCSymbol *PreInstrSymbol,
548 MCSymbol *PostInstrSymbol, MDNode *HeapAllocMarker, MDNode *PCSections,
549 uint32_t CFIType) {
550 return MachineInstr::ExtraInfo::create(Allocator, MMOs, PreInstrSymbol,
551 PostInstrSymbol, HeapAllocMarker,
552 PCSections, CFIType);
555 const char *MachineFunction::createExternalSymbolName(StringRef Name) {
556 char *Dest = Allocator.Allocate<char>(Name.size() + 1);
557 llvm::copy(Name, Dest);
558 Dest[Name.size()] = 0;
559 return Dest;
562 uint32_t *MachineFunction::allocateRegMask() {
563 unsigned NumRegs = getSubtarget().getRegisterInfo()->getNumRegs();
564 unsigned Size = MachineOperand::getRegMaskSize(NumRegs);
565 uint32_t *Mask = Allocator.Allocate<uint32_t>(Size);
566 memset(Mask, 0, Size * sizeof(Mask[0]));
567 return Mask;
570 ArrayRef<int> MachineFunction::allocateShuffleMask(ArrayRef<int> Mask) {
571 int* AllocMask = Allocator.Allocate<int>(Mask.size());
572 copy(Mask, AllocMask);
573 return {AllocMask, Mask.size()};
576 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
577 LLVM_DUMP_METHOD void MachineFunction::dump() const {
578 print(dbgs());
580 #endif
582 StringRef MachineFunction::getName() const {
583 return getFunction().getName();
586 void MachineFunction::print(raw_ostream &OS, const SlotIndexes *Indexes) const {
587 OS << "# Machine code for function " << getName() << ": ";
588 getProperties().print(OS);
589 OS << '\n';
591 // Print Frame Information
592 FrameInfo->print(*this, OS);
594 // Print JumpTable Information
595 if (JumpTableInfo)
596 JumpTableInfo->print(OS);
598 // Print Constant Pool
599 ConstantPool->print(OS);
601 const TargetRegisterInfo *TRI = getSubtarget().getRegisterInfo();
603 if (RegInfo && !RegInfo->livein_empty()) {
604 OS << "Function Live Ins: ";
605 for (MachineRegisterInfo::livein_iterator
606 I = RegInfo->livein_begin(), E = RegInfo->livein_end(); I != E; ++I) {
607 OS << printReg(I->first, TRI);
608 if (I->second)
609 OS << " in " << printReg(I->second, TRI);
610 if (std::next(I) != E)
611 OS << ", ";
613 OS << '\n';
616 ModuleSlotTracker MST(getFunction().getParent());
617 MST.incorporateFunction(getFunction());
618 for (const auto &BB : *this) {
619 OS << '\n';
620 // If we print the whole function, print it at its most verbose level.
621 BB.print(OS, MST, Indexes, /*IsStandalone=*/true);
624 OS << "\n# End machine code for function " << getName() << ".\n\n";
627 /// True if this function needs frame moves for debug or exceptions.
628 bool MachineFunction::needsFrameMoves() const {
629 return getMMI().hasDebugInfo() ||
630 getTarget().Options.ForceDwarfFrameSection ||
631 F.needsUnwindTableEntry();
634 namespace llvm {
636 template<>
637 struct DOTGraphTraits<const MachineFunction*> : public DefaultDOTGraphTraits {
638 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
640 static std::string getGraphName(const MachineFunction *F) {
641 return ("CFG for '" + F->getName() + "' function").str();
644 std::string getNodeLabel(const MachineBasicBlock *Node,
645 const MachineFunction *Graph) {
646 std::string OutStr;
648 raw_string_ostream OSS(OutStr);
650 if (isSimple()) {
651 OSS << printMBBReference(*Node);
652 if (const BasicBlock *BB = Node->getBasicBlock())
653 OSS << ": " << BB->getName();
654 } else
655 Node->print(OSS);
658 if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
660 // Process string output to make it nicer...
661 for (unsigned i = 0; i != OutStr.length(); ++i)
662 if (OutStr[i] == '\n') { // Left justify
663 OutStr[i] = '\\';
664 OutStr.insert(OutStr.begin()+i+1, 'l');
666 return OutStr;
670 } // end namespace llvm
672 void MachineFunction::viewCFG() const
674 #ifndef NDEBUG
675 ViewGraph(this, "mf" + getName());
676 #else
677 errs() << "MachineFunction::viewCFG is only available in debug builds on "
678 << "systems with Graphviz or gv!\n";
679 #endif // NDEBUG
682 void MachineFunction::viewCFGOnly() const
684 #ifndef NDEBUG
685 ViewGraph(this, "mf" + getName(), true);
686 #else
687 errs() << "MachineFunction::viewCFGOnly is only available in debug builds on "
688 << "systems with Graphviz or gv!\n";
689 #endif // NDEBUG
692 /// Add the specified physical register as a live-in value and
693 /// create a corresponding virtual register for it.
694 Register MachineFunction::addLiveIn(MCRegister PReg,
695 const TargetRegisterClass *RC) {
696 MachineRegisterInfo &MRI = getRegInfo();
697 Register VReg = MRI.getLiveInVirtReg(PReg);
698 if (VReg) {
699 const TargetRegisterClass *VRegRC = MRI.getRegClass(VReg);
700 (void)VRegRC;
701 // A physical register can be added several times.
702 // Between two calls, the register class of the related virtual register
703 // may have been constrained to match some operation constraints.
704 // In that case, check that the current register class includes the
705 // physical register and is a sub class of the specified RC.
706 assert((VRegRC == RC || (VRegRC->contains(PReg) &&
707 RC->hasSubClassEq(VRegRC))) &&
708 "Register class mismatch!");
709 return VReg;
711 VReg = MRI.createVirtualRegister(RC);
712 MRI.addLiveIn(PReg, VReg);
713 return VReg;
716 /// Return the MCSymbol for the specified non-empty jump table.
717 /// If isLinkerPrivate is specified, an 'l' label is returned, otherwise a
718 /// normal 'L' label is returned.
719 MCSymbol *MachineFunction::getJTISymbol(unsigned JTI, MCContext &Ctx,
720 bool isLinkerPrivate) const {
721 const DataLayout &DL = getDataLayout();
722 assert(JumpTableInfo && "No jump tables");
723 assert(JTI < JumpTableInfo->getJumpTables().size() && "Invalid JTI!");
725 StringRef Prefix = isLinkerPrivate ? DL.getLinkerPrivateGlobalPrefix()
726 : DL.getPrivateGlobalPrefix();
727 SmallString<60> Name;
728 raw_svector_ostream(Name)
729 << Prefix << "JTI" << getFunctionNumber() << '_' << JTI;
730 return Ctx.getOrCreateSymbol(Name);
733 /// Return a function-local symbol to represent the PIC base.
734 MCSymbol *MachineFunction::getPICBaseSymbol() const {
735 const DataLayout &DL = getDataLayout();
736 return Ctx.getOrCreateSymbol(Twine(DL.getPrivateGlobalPrefix()) +
737 Twine(getFunctionNumber()) + "$pb");
740 /// \name Exception Handling
741 /// \{
743 LandingPadInfo &
744 MachineFunction::getOrCreateLandingPadInfo(MachineBasicBlock *LandingPad) {
745 unsigned N = LandingPads.size();
746 for (unsigned i = 0; i < N; ++i) {
747 LandingPadInfo &LP = LandingPads[i];
748 if (LP.LandingPadBlock == LandingPad)
749 return LP;
752 LandingPads.push_back(LandingPadInfo(LandingPad));
753 return LandingPads[N];
756 void MachineFunction::addInvoke(MachineBasicBlock *LandingPad,
757 MCSymbol *BeginLabel, MCSymbol *EndLabel) {
758 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
759 LP.BeginLabels.push_back(BeginLabel);
760 LP.EndLabels.push_back(EndLabel);
763 MCSymbol *MachineFunction::addLandingPad(MachineBasicBlock *LandingPad) {
764 MCSymbol *LandingPadLabel = Ctx.createTempSymbol();
765 LandingPadInfo &LP = getOrCreateLandingPadInfo(LandingPad);
766 LP.LandingPadLabel = LandingPadLabel;
768 const Instruction *FirstI = LandingPad->getBasicBlock()->getFirstNonPHI();
769 if (const auto *LPI = dyn_cast<LandingPadInst>(FirstI)) {
770 // If there's no typeid list specified, then "cleanup" is implicit.
771 // Otherwise, id 0 is reserved for the cleanup action.
772 if (LPI->isCleanup() && LPI->getNumClauses() != 0)
773 LP.TypeIds.push_back(0);
775 // FIXME: New EH - Add the clauses in reverse order. This isn't 100%
776 // correct, but we need to do it this way because of how the DWARF EH
777 // emitter processes the clauses.
778 for (unsigned I = LPI->getNumClauses(); I != 0; --I) {
779 Value *Val = LPI->getClause(I - 1);
780 if (LPI->isCatch(I - 1)) {
781 LP.TypeIds.push_back(
782 getTypeIDFor(dyn_cast<GlobalValue>(Val->stripPointerCasts())));
783 } else {
784 // Add filters in a list.
785 auto *CVal = cast<Constant>(Val);
786 SmallVector<unsigned, 4> FilterList;
787 for (const Use &U : CVal->operands())
788 FilterList.push_back(
789 getTypeIDFor(cast<GlobalValue>(U->stripPointerCasts())));
791 LP.TypeIds.push_back(getFilterIDFor(FilterList));
795 } else if (const auto *CPI = dyn_cast<CatchPadInst>(FirstI)) {
796 for (unsigned I = CPI->arg_size(); I != 0; --I) {
797 auto *TypeInfo =
798 dyn_cast<GlobalValue>(CPI->getArgOperand(I - 1)->stripPointerCasts());
799 LP.TypeIds.push_back(getTypeIDFor(TypeInfo));
802 } else {
803 assert(isa<CleanupPadInst>(FirstI) && "Invalid landingpad!");
806 return LandingPadLabel;
809 void MachineFunction::setCallSiteLandingPad(MCSymbol *Sym,
810 ArrayRef<unsigned> Sites) {
811 LPadToCallSiteMap[Sym].append(Sites.begin(), Sites.end());
814 unsigned MachineFunction::getTypeIDFor(const GlobalValue *TI) {
815 for (unsigned i = 0, N = TypeInfos.size(); i != N; ++i)
816 if (TypeInfos[i] == TI) return i + 1;
818 TypeInfos.push_back(TI);
819 return TypeInfos.size();
822 int MachineFunction::getFilterIDFor(ArrayRef<unsigned> TyIds) {
823 // If the new filter coincides with the tail of an existing filter, then
824 // re-use the existing filter. Folding filters more than this requires
825 // re-ordering filters and/or their elements - probably not worth it.
826 for (unsigned i : FilterEnds) {
827 unsigned j = TyIds.size();
829 while (i && j)
830 if (FilterIds[--i] != TyIds[--j])
831 goto try_next;
833 if (!j)
834 // The new filter coincides with range [i, end) of the existing filter.
835 return -(1 + i);
837 try_next:;
840 // Add the new filter.
841 int FilterID = -(1 + FilterIds.size());
842 FilterIds.reserve(FilterIds.size() + TyIds.size() + 1);
843 llvm::append_range(FilterIds, TyIds);
844 FilterEnds.push_back(FilterIds.size());
845 FilterIds.push_back(0); // terminator
846 return FilterID;
849 MachineFunction::CallSiteInfoMap::iterator
850 MachineFunction::getCallSiteInfo(const MachineInstr *MI) {
851 assert(MI->isCandidateForCallSiteEntry() &&
852 "Call site info refers only to call (MI) candidates");
854 if (!Target.Options.EmitCallSiteInfo)
855 return CallSitesInfo.end();
856 return CallSitesInfo.find(MI);
859 /// Return the call machine instruction or find a call within bundle.
860 static const MachineInstr *getCallInstr(const MachineInstr *MI) {
861 if (!MI->isBundle())
862 return MI;
864 for (const auto &BMI : make_range(getBundleStart(MI->getIterator()),
865 getBundleEnd(MI->getIterator())))
866 if (BMI.isCandidateForCallSiteEntry())
867 return &BMI;
869 llvm_unreachable("Unexpected bundle without a call site candidate");
872 void MachineFunction::eraseCallSiteInfo(const MachineInstr *MI) {
873 assert(MI->shouldUpdateCallSiteInfo() &&
874 "Call site info refers only to call (MI) candidates or "
875 "candidates inside bundles");
877 const MachineInstr *CallMI = getCallInstr(MI);
878 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(CallMI);
879 if (CSIt == CallSitesInfo.end())
880 return;
881 CallSitesInfo.erase(CSIt);
884 void MachineFunction::copyCallSiteInfo(const MachineInstr *Old,
885 const MachineInstr *New) {
886 assert(Old->shouldUpdateCallSiteInfo() &&
887 "Call site info refers only to call (MI) candidates or "
888 "candidates inside bundles");
890 if (!New->isCandidateForCallSiteEntry())
891 return eraseCallSiteInfo(Old);
893 const MachineInstr *OldCallMI = getCallInstr(Old);
894 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
895 if (CSIt == CallSitesInfo.end())
896 return;
898 CallSiteInfo CSInfo = CSIt->second;
899 CallSitesInfo[New] = CSInfo;
902 void MachineFunction::moveCallSiteInfo(const MachineInstr *Old,
903 const MachineInstr *New) {
904 assert(Old->shouldUpdateCallSiteInfo() &&
905 "Call site info refers only to call (MI) candidates or "
906 "candidates inside bundles");
908 if (!New->isCandidateForCallSiteEntry())
909 return eraseCallSiteInfo(Old);
911 const MachineInstr *OldCallMI = getCallInstr(Old);
912 CallSiteInfoMap::iterator CSIt = getCallSiteInfo(OldCallMI);
913 if (CSIt == CallSitesInfo.end())
914 return;
916 CallSiteInfo CSInfo = std::move(CSIt->second);
917 CallSitesInfo.erase(CSIt);
918 CallSitesInfo[New] = CSInfo;
921 void MachineFunction::setDebugInstrNumberingCount(unsigned Num) {
922 DebugInstrNumberingCount = Num;
925 void MachineFunction::makeDebugValueSubstitution(DebugInstrOperandPair A,
926 DebugInstrOperandPair B,
927 unsigned Subreg) {
928 // Catch any accidental self-loops.
929 assert(A.first != B.first);
930 // Don't allow any substitutions _from_ the memory operand number.
931 assert(A.second != DebugOperandMemNumber);
933 DebugValueSubstitutions.push_back({A, B, Subreg});
936 void MachineFunction::substituteDebugValuesForInst(const MachineInstr &Old,
937 MachineInstr &New,
938 unsigned MaxOperand) {
939 // If the Old instruction wasn't tracked at all, there is no work to do.
940 unsigned OldInstrNum = Old.peekDebugInstrNum();
941 if (!OldInstrNum)
942 return;
944 // Iterate over all operands looking for defs to create substitutions for.
945 // Avoid creating new instr numbers unless we create a new substitution.
946 // While this has no functional effect, it risks confusing someone reading
947 // MIR output.
948 // Examine all the operands, or the first N specified by the caller.
949 MaxOperand = std::min(MaxOperand, Old.getNumOperands());
950 for (unsigned int I = 0; I < MaxOperand; ++I) {
951 const auto &OldMO = Old.getOperand(I);
952 auto &NewMO = New.getOperand(I);
953 (void)NewMO;
955 if (!OldMO.isReg() || !OldMO.isDef())
956 continue;
957 assert(NewMO.isDef());
959 unsigned NewInstrNum = New.getDebugInstrNum();
960 makeDebugValueSubstitution(std::make_pair(OldInstrNum, I),
961 std::make_pair(NewInstrNum, I));
965 auto MachineFunction::salvageCopySSA(
966 MachineInstr &MI, DenseMap<Register, DebugInstrOperandPair> &DbgPHICache)
967 -> DebugInstrOperandPair {
968 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo();
970 // Check whether this copy-like instruction has already been salvaged into
971 // an operand pair.
972 Register Dest;
973 if (auto CopyDstSrc = TII.isCopyInstr(MI)) {
974 Dest = CopyDstSrc->Destination->getReg();
975 } else {
976 assert(MI.isSubregToReg());
977 Dest = MI.getOperand(0).getReg();
980 auto CacheIt = DbgPHICache.find(Dest);
981 if (CacheIt != DbgPHICache.end())
982 return CacheIt->second;
984 // Calculate the instruction number to use, or install a DBG_PHI.
985 auto OperandPair = salvageCopySSAImpl(MI);
986 DbgPHICache.insert({Dest, OperandPair});
987 return OperandPair;
990 auto MachineFunction::salvageCopySSAImpl(MachineInstr &MI)
991 -> DebugInstrOperandPair {
992 MachineRegisterInfo &MRI = getRegInfo();
993 const TargetRegisterInfo &TRI = *MRI.getTargetRegisterInfo();
994 const TargetInstrInfo &TII = *getSubtarget().getInstrInfo();
996 // Chase the value read by a copy-like instruction back to the instruction
997 // that ultimately _defines_ that value. This may pass:
998 // * Through multiple intermediate copies, including subregister moves /
999 // copies,
1000 // * Copies from physical registers that must then be traced back to the
1001 // defining instruction,
1002 // * Or, physical registers may be live-in to (only) the entry block, which
1003 // requires a DBG_PHI to be created.
1004 // We can pursue this problem in that order: trace back through copies,
1005 // optionally through a physical register, to a defining instruction. We
1006 // should never move from physreg to vreg. As we're still in SSA form, no need
1007 // to worry about partial definitions of registers.
1009 // Helper lambda to interpret a copy-like instruction. Takes instruction,
1010 // returns the register read and any subregister identifying which part is
1011 // read.
1012 auto GetRegAndSubreg =
1013 [&](const MachineInstr &Cpy) -> std::pair<Register, unsigned> {
1014 Register NewReg, OldReg;
1015 unsigned SubReg;
1016 if (Cpy.isCopy()) {
1017 OldReg = Cpy.getOperand(0).getReg();
1018 NewReg = Cpy.getOperand(1).getReg();
1019 SubReg = Cpy.getOperand(1).getSubReg();
1020 } else if (Cpy.isSubregToReg()) {
1021 OldReg = Cpy.getOperand(0).getReg();
1022 NewReg = Cpy.getOperand(2).getReg();
1023 SubReg = Cpy.getOperand(3).getImm();
1024 } else {
1025 auto CopyDetails = *TII.isCopyInstr(Cpy);
1026 const MachineOperand &Src = *CopyDetails.Source;
1027 const MachineOperand &Dest = *CopyDetails.Destination;
1028 OldReg = Dest.getReg();
1029 NewReg = Src.getReg();
1030 SubReg = Src.getSubReg();
1033 return {NewReg, SubReg};
1036 // First seek either the defining instruction, or a copy from a physreg.
1037 // During search, the current state is the current copy instruction, and which
1038 // register we've read. Accumulate qualifying subregisters into SubregsSeen;
1039 // deal with those later.
1040 auto State = GetRegAndSubreg(MI);
1041 auto CurInst = MI.getIterator();
1042 SmallVector<unsigned, 4> SubregsSeen;
1043 while (true) {
1044 // If we've found a copy from a physreg, first portion of search is over.
1045 if (!State.first.isVirtual())
1046 break;
1048 // Record any subregister qualifier.
1049 if (State.second)
1050 SubregsSeen.push_back(State.second);
1052 assert(MRI.hasOneDef(State.first));
1053 MachineInstr &Inst = *MRI.def_begin(State.first)->getParent();
1054 CurInst = Inst.getIterator();
1056 // Any non-copy instruction is the defining instruction we're seeking.
1057 if (!Inst.isCopyLike() && !TII.isCopyInstr(Inst))
1058 break;
1059 State = GetRegAndSubreg(Inst);
1062 // Helper lambda to apply additional subregister substitutions to a known
1063 // instruction/operand pair. Adds new (fake) substitutions so that we can
1064 // record the subregister. FIXME: this isn't very space efficient if multiple
1065 // values are tracked back through the same copies; cache something later.
1066 auto ApplySubregisters =
1067 [&](DebugInstrOperandPair P) -> DebugInstrOperandPair {
1068 for (unsigned Subreg : reverse(SubregsSeen)) {
1069 // Fetch a new instruction number, not attached to an actual instruction.
1070 unsigned NewInstrNumber = getNewDebugInstrNum();
1071 // Add a substitution from the "new" number to the known one, with a
1072 // qualifying subreg.
1073 makeDebugValueSubstitution({NewInstrNumber, 0}, P, Subreg);
1074 // Return the new number; to find the underlying value, consumers need to
1075 // deal with the qualifying subreg.
1076 P = {NewInstrNumber, 0};
1078 return P;
1081 // If we managed to find the defining instruction after COPYs, return an
1082 // instruction / operand pair after adding subregister qualifiers.
1083 if (State.first.isVirtual()) {
1084 // Virtual register def -- we can just look up where this happens.
1085 MachineInstr *Inst = MRI.def_begin(State.first)->getParent();
1086 for (auto &MO : Inst->operands()) {
1087 if (!MO.isReg() || !MO.isDef() || MO.getReg() != State.first)
1088 continue;
1089 return ApplySubregisters(
1090 {Inst->getDebugInstrNum(), Inst->getOperandNo(&MO)});
1093 llvm_unreachable("Vreg def with no corresponding operand?");
1096 // Our search ended in a copy from a physreg: walk back up the function
1097 // looking for whatever defines the physreg.
1098 assert(CurInst->isCopyLike() || TII.isCopyInstr(*CurInst));
1099 State = GetRegAndSubreg(*CurInst);
1100 Register RegToSeek = State.first;
1102 auto RMII = CurInst->getReverseIterator();
1103 auto PrevInstrs = make_range(RMII, CurInst->getParent()->instr_rend());
1104 for (auto &ToExamine : PrevInstrs) {
1105 for (auto &MO : ToExamine.operands()) {
1106 // Test for operand that defines something aliasing RegToSeek.
1107 if (!MO.isReg() || !MO.isDef() ||
1108 !TRI.regsOverlap(RegToSeek, MO.getReg()))
1109 continue;
1111 return ApplySubregisters(
1112 {ToExamine.getDebugInstrNum(), ToExamine.getOperandNo(&MO)});
1116 MachineBasicBlock &InsertBB = *CurInst->getParent();
1118 // We reached the start of the block before finding a defining instruction.
1119 // There are numerous scenarios where this can happen:
1120 // * Constant physical registers,
1121 // * Several intrinsics that allow LLVM-IR to read arbitary registers,
1122 // * Arguments in the entry block,
1123 // * Exception handling landing pads.
1124 // Validating all of them is too difficult, so just insert a DBG_PHI reading
1125 // the variable value at this position, rather than checking it makes sense.
1127 // Create DBG_PHI for specified physreg.
1128 auto Builder = BuildMI(InsertBB, InsertBB.getFirstNonPHI(), DebugLoc(),
1129 TII.get(TargetOpcode::DBG_PHI));
1130 Builder.addReg(State.first);
1131 unsigned NewNum = getNewDebugInstrNum();
1132 Builder.addImm(NewNum);
1133 return ApplySubregisters({NewNum, 0u});
1136 void MachineFunction::finalizeDebugInstrRefs() {
1137 auto *TII = getSubtarget().getInstrInfo();
1139 auto MakeUndefDbgValue = [&](MachineInstr &MI) {
1140 const MCInstrDesc &RefII = TII->get(TargetOpcode::DBG_VALUE_LIST);
1141 MI.setDesc(RefII);
1142 MI.setDebugValueUndef();
1145 DenseMap<Register, DebugInstrOperandPair> ArgDbgPHIs;
1146 for (auto &MBB : *this) {
1147 for (auto &MI : MBB) {
1148 if (!MI.isDebugRef())
1149 continue;
1151 bool IsValidRef = true;
1153 for (MachineOperand &MO : MI.debug_operands()) {
1154 if (!MO.isReg())
1155 continue;
1157 Register Reg = MO.getReg();
1159 // Some vregs can be deleted as redundant in the meantime. Mark those
1160 // as DBG_VALUE $noreg. Additionally, some normal instructions are
1161 // quickly deleted, leaving dangling references to vregs with no def.
1162 if (Reg == 0 || !RegInfo->hasOneDef(Reg)) {
1163 IsValidRef = false;
1164 break;
1167 assert(Reg.isVirtual());
1168 MachineInstr &DefMI = *RegInfo->def_instr_begin(Reg);
1170 // If we've found a copy-like instruction, follow it back to the
1171 // instruction that defines the source value, see salvageCopySSA docs
1172 // for why this is important.
1173 if (DefMI.isCopyLike() || TII->isCopyInstr(DefMI)) {
1174 auto Result = salvageCopySSA(DefMI, ArgDbgPHIs);
1175 MO.ChangeToDbgInstrRef(Result.first, Result.second);
1176 } else {
1177 // Otherwise, identify the operand number that the VReg refers to.
1178 unsigned OperandIdx = 0;
1179 for (const auto &DefMO : DefMI.operands()) {
1180 if (DefMO.isReg() && DefMO.isDef() && DefMO.getReg() == Reg)
1181 break;
1182 ++OperandIdx;
1184 assert(OperandIdx < DefMI.getNumOperands());
1186 // Morph this instr ref to point at the given instruction and operand.
1187 unsigned ID = DefMI.getDebugInstrNum();
1188 MO.ChangeToDbgInstrRef(ID, OperandIdx);
1192 if (!IsValidRef)
1193 MakeUndefDbgValue(MI);
1198 bool MachineFunction::shouldUseDebugInstrRef() const {
1199 // Disable instr-ref at -O0: it's very slow (in compile time). We can still
1200 // have optimized code inlined into this unoptimized code, however with
1201 // fewer and less aggressive optimizations happening, coverage and accuracy
1202 // should not suffer.
1203 if (getTarget().getOptLevel() == CodeGenOpt::None)
1204 return false;
1206 // Don't use instr-ref if this function is marked optnone.
1207 if (F.hasFnAttribute(Attribute::OptimizeNone))
1208 return false;
1210 if (llvm::debuginfoShouldUseDebugInstrRef(getTarget().getTargetTriple()))
1211 return true;
1213 return false;
1216 bool MachineFunction::useDebugInstrRef() const {
1217 return UseDebugInstrRef;
1220 void MachineFunction::setUseDebugInstrRef(bool Use) {
1221 UseDebugInstrRef = Use;
1224 // Use one million as a high / reserved number.
1225 const unsigned MachineFunction::DebugOperandMemNumber = 1000000;
1227 /// \}
1229 //===----------------------------------------------------------------------===//
1230 // MachineJumpTableInfo implementation
1231 //===----------------------------------------------------------------------===//
1233 /// Return the size of each entry in the jump table.
1234 unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
1235 // The size of a jump table entry is 4 bytes unless the entry is just the
1236 // address of a block, in which case it is the pointer size.
1237 switch (getEntryKind()) {
1238 case MachineJumpTableInfo::EK_BlockAddress:
1239 return TD.getPointerSize();
1240 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1241 return 8;
1242 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1243 case MachineJumpTableInfo::EK_LabelDifference32:
1244 case MachineJumpTableInfo::EK_Custom32:
1245 return 4;
1246 case MachineJumpTableInfo::EK_Inline:
1247 return 0;
1249 llvm_unreachable("Unknown jump table encoding!");
1252 /// Return the alignment of each entry in the jump table.
1253 unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
1254 // The alignment of a jump table entry is the alignment of int32 unless the
1255 // entry is just the address of a block, in which case it is the pointer
1256 // alignment.
1257 switch (getEntryKind()) {
1258 case MachineJumpTableInfo::EK_BlockAddress:
1259 return TD.getPointerABIAlignment(0).value();
1260 case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1261 return TD.getABIIntegerTypeAlignment(64).value();
1262 case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1263 case MachineJumpTableInfo::EK_LabelDifference32:
1264 case MachineJumpTableInfo::EK_Custom32:
1265 return TD.getABIIntegerTypeAlignment(32).value();
1266 case MachineJumpTableInfo::EK_Inline:
1267 return 1;
1269 llvm_unreachable("Unknown jump table encoding!");
1272 /// Create a new jump table entry in the jump table info.
1273 unsigned MachineJumpTableInfo::createJumpTableIndex(
1274 const std::vector<MachineBasicBlock*> &DestBBs) {
1275 assert(!DestBBs.empty() && "Cannot create an empty jump table!");
1276 JumpTables.push_back(MachineJumpTableEntry(DestBBs));
1277 return JumpTables.size()-1;
1280 /// If Old is the target of any jump tables, update the jump tables to branch
1281 /// to New instead.
1282 bool MachineJumpTableInfo::ReplaceMBBInJumpTables(MachineBasicBlock *Old,
1283 MachineBasicBlock *New) {
1284 assert(Old != New && "Not making a change?");
1285 bool MadeChange = false;
1286 for (size_t i = 0, e = JumpTables.size(); i != e; ++i)
1287 ReplaceMBBInJumpTable(i, Old, New);
1288 return MadeChange;
1291 /// If MBB is present in any jump tables, remove it.
1292 bool MachineJumpTableInfo::RemoveMBBFromJumpTables(MachineBasicBlock *MBB) {
1293 bool MadeChange = false;
1294 for (MachineJumpTableEntry &JTE : JumpTables) {
1295 auto removeBeginItr = std::remove(JTE.MBBs.begin(), JTE.MBBs.end(), MBB);
1296 MadeChange |= (removeBeginItr != JTE.MBBs.end());
1297 JTE.MBBs.erase(removeBeginItr, JTE.MBBs.end());
1299 return MadeChange;
1302 /// If Old is a target of the jump tables, update the jump table to branch to
1303 /// New instead.
1304 bool MachineJumpTableInfo::ReplaceMBBInJumpTable(unsigned Idx,
1305 MachineBasicBlock *Old,
1306 MachineBasicBlock *New) {
1307 assert(Old != New && "Not making a change?");
1308 bool MadeChange = false;
1309 MachineJumpTableEntry &JTE = JumpTables[Idx];
1310 for (MachineBasicBlock *&MBB : JTE.MBBs)
1311 if (MBB == Old) {
1312 MBB = New;
1313 MadeChange = true;
1315 return MadeChange;
1318 void MachineJumpTableInfo::print(raw_ostream &OS) const {
1319 if (JumpTables.empty()) return;
1321 OS << "Jump Tables:\n";
1323 for (unsigned i = 0, e = JumpTables.size(); i != e; ++i) {
1324 OS << printJumpTableEntryReference(i) << ':';
1325 for (const MachineBasicBlock *MBB : JumpTables[i].MBBs)
1326 OS << ' ' << printMBBReference(*MBB);
1327 if (i != e)
1328 OS << '\n';
1331 OS << '\n';
1334 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1335 LLVM_DUMP_METHOD void MachineJumpTableInfo::dump() const { print(dbgs()); }
1336 #endif
1338 Printable llvm::printJumpTableEntryReference(unsigned Idx) {
1339 return Printable([Idx](raw_ostream &OS) { OS << "%jump-table." << Idx; });
1342 //===----------------------------------------------------------------------===//
1343 // MachineConstantPool implementation
1344 //===----------------------------------------------------------------------===//
1346 void MachineConstantPoolValue::anchor() {}
1348 unsigned MachineConstantPoolValue::getSizeInBytes(const DataLayout &DL) const {
1349 return DL.getTypeAllocSize(Ty);
1352 unsigned MachineConstantPoolEntry::getSizeInBytes(const DataLayout &DL) const {
1353 if (isMachineConstantPoolEntry())
1354 return Val.MachineCPVal->getSizeInBytes(DL);
1355 return DL.getTypeAllocSize(Val.ConstVal->getType());
1358 bool MachineConstantPoolEntry::needsRelocation() const {
1359 if (isMachineConstantPoolEntry())
1360 return true;
1361 return Val.ConstVal->needsDynamicRelocation();
1364 SectionKind
1365 MachineConstantPoolEntry::getSectionKind(const DataLayout *DL) const {
1366 if (needsRelocation())
1367 return SectionKind::getReadOnlyWithRel();
1368 switch (getSizeInBytes(*DL)) {
1369 case 4:
1370 return SectionKind::getMergeableConst4();
1371 case 8:
1372 return SectionKind::getMergeableConst8();
1373 case 16:
1374 return SectionKind::getMergeableConst16();
1375 case 32:
1376 return SectionKind::getMergeableConst32();
1377 default:
1378 return SectionKind::getReadOnly();
1382 MachineConstantPool::~MachineConstantPool() {
1383 // A constant may be a member of both Constants and MachineCPVsSharingEntries,
1384 // so keep track of which we've deleted to avoid double deletions.
1385 DenseSet<MachineConstantPoolValue*> Deleted;
1386 for (const MachineConstantPoolEntry &C : Constants)
1387 if (C.isMachineConstantPoolEntry()) {
1388 Deleted.insert(C.Val.MachineCPVal);
1389 delete C.Val.MachineCPVal;
1391 for (MachineConstantPoolValue *CPV : MachineCPVsSharingEntries) {
1392 if (Deleted.count(CPV) == 0)
1393 delete CPV;
1397 /// Test whether the given two constants can be allocated the same constant pool
1398 /// entry.
1399 static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
1400 const DataLayout &DL) {
1401 // Handle the trivial case quickly.
1402 if (A == B) return true;
1404 // If they have the same type but weren't the same constant, quickly
1405 // reject them.
1406 if (A->getType() == B->getType()) return false;
1408 // We can't handle structs or arrays.
1409 if (isa<StructType>(A->getType()) || isa<ArrayType>(A->getType()) ||
1410 isa<StructType>(B->getType()) || isa<ArrayType>(B->getType()))
1411 return false;
1413 // For now, only support constants with the same size.
1414 uint64_t StoreSize = DL.getTypeStoreSize(A->getType());
1415 if (StoreSize != DL.getTypeStoreSize(B->getType()) || StoreSize > 128)
1416 return false;
1418 Type *IntTy = IntegerType::get(A->getContext(), StoreSize*8);
1420 // Try constant folding a bitcast of both instructions to an integer. If we
1421 // get two identical ConstantInt's, then we are good to share them. We use
1422 // the constant folding APIs to do this so that we get the benefit of
1423 // DataLayout.
1424 if (isa<PointerType>(A->getType()))
1425 A = ConstantFoldCastOperand(Instruction::PtrToInt,
1426 const_cast<Constant *>(A), IntTy, DL);
1427 else if (A->getType() != IntTy)
1428 A = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(A),
1429 IntTy, DL);
1430 if (isa<PointerType>(B->getType()))
1431 B = ConstantFoldCastOperand(Instruction::PtrToInt,
1432 const_cast<Constant *>(B), IntTy, DL);
1433 else if (B->getType() != IntTy)
1434 B = ConstantFoldCastOperand(Instruction::BitCast, const_cast<Constant *>(B),
1435 IntTy, DL);
1437 return A == B;
1440 /// Create a new entry in the constant pool or return an existing one.
1441 /// User must specify the log2 of the minimum required alignment for the object.
1442 unsigned MachineConstantPool::getConstantPoolIndex(const Constant *C,
1443 Align Alignment) {
1444 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1446 // Check to see if we already have this constant.
1448 // FIXME, this could be made much more efficient for large constant pools.
1449 for (unsigned i = 0, e = Constants.size(); i != e; ++i)
1450 if (!Constants[i].isMachineConstantPoolEntry() &&
1451 CanShareConstantPoolEntry(Constants[i].Val.ConstVal, C, DL)) {
1452 if (Constants[i].getAlign() < Alignment)
1453 Constants[i].Alignment = Alignment;
1454 return i;
1457 Constants.push_back(MachineConstantPoolEntry(C, Alignment));
1458 return Constants.size()-1;
1461 unsigned MachineConstantPool::getConstantPoolIndex(MachineConstantPoolValue *V,
1462 Align Alignment) {
1463 if (Alignment > PoolAlignment) PoolAlignment = Alignment;
1465 // Check to see if we already have this constant.
1467 // FIXME, this could be made much more efficient for large constant pools.
1468 int Idx = V->getExistingMachineCPValue(this, Alignment);
1469 if (Idx != -1) {
1470 MachineCPVsSharingEntries.insert(V);
1471 return (unsigned)Idx;
1474 Constants.push_back(MachineConstantPoolEntry(V, Alignment));
1475 return Constants.size()-1;
1478 void MachineConstantPool::print(raw_ostream &OS) const {
1479 if (Constants.empty()) return;
1481 OS << "Constant Pool:\n";
1482 for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1483 OS << " cp#" << i << ": ";
1484 if (Constants[i].isMachineConstantPoolEntry())
1485 Constants[i].Val.MachineCPVal->print(OS);
1486 else
1487 Constants[i].Val.ConstVal->printAsOperand(OS, /*PrintType=*/false);
1488 OS << ", align=" << Constants[i].getAlign().value();
1489 OS << "\n";
1493 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1494 LLVM_DUMP_METHOD void MachineConstantPool::dump() const { print(dbgs()); }
1495 #endif