[sanitizer] Improve FreeBSD ASLR detection
[llvm-project.git] / llvm / lib / CodeGen / MachineVerifier.cpp
blob005d4ad1a3280165f95b2bffb8e71fbaa4e1ba27
1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Pass to verify generated machine code. The following is checked:
11 // Operand counts: All explicit operands must be present.
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DenseSet.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetOperations.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/StringRef.h"
33 #include "llvm/ADT/Twine.h"
34 #include "llvm/Analysis/EHPersonalities.h"
35 #include "llvm/CodeGen/GlobalISel/RegisterBank.h"
36 #include "llvm/CodeGen/LiveInterval.h"
37 #include "llvm/CodeGen/LiveIntervalCalc.h"
38 #include "llvm/CodeGen/LiveIntervals.h"
39 #include "llvm/CodeGen/LiveStacks.h"
40 #include "llvm/CodeGen/LiveVariables.h"
41 #include "llvm/CodeGen/MachineBasicBlock.h"
42 #include "llvm/CodeGen/MachineFrameInfo.h"
43 #include "llvm/CodeGen/MachineFunction.h"
44 #include "llvm/CodeGen/MachineFunctionPass.h"
45 #include "llvm/CodeGen/MachineInstr.h"
46 #include "llvm/CodeGen/MachineInstrBundle.h"
47 #include "llvm/CodeGen/MachineMemOperand.h"
48 #include "llvm/CodeGen/MachineOperand.h"
49 #include "llvm/CodeGen/MachineRegisterInfo.h"
50 #include "llvm/CodeGen/PseudoSourceValue.h"
51 #include "llvm/CodeGen/SlotIndexes.h"
52 #include "llvm/CodeGen/StackMaps.h"
53 #include "llvm/CodeGen/TargetInstrInfo.h"
54 #include "llvm/CodeGen/TargetOpcodes.h"
55 #include "llvm/CodeGen/TargetRegisterInfo.h"
56 #include "llvm/CodeGen/TargetSubtargetInfo.h"
57 #include "llvm/IR/BasicBlock.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/InlineAsm.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/MC/LaneBitmask.h"
63 #include "llvm/MC/MCAsmInfo.h"
64 #include "llvm/MC/MCInstrDesc.h"
65 #include "llvm/MC/MCRegisterInfo.h"
66 #include "llvm/MC/MCTargetOptions.h"
67 #include "llvm/Pass.h"
68 #include "llvm/Support/Casting.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/LowLevelTypeImpl.h"
71 #include "llvm/Support/MathExtras.h"
72 #include "llvm/Support/raw_ostream.h"
73 #include "llvm/Target/TargetMachine.h"
74 #include <algorithm>
75 #include <cassert>
76 #include <cstddef>
77 #include <cstdint>
78 #include <iterator>
79 #include <string>
80 #include <utility>
82 using namespace llvm;
84 namespace {
86 struct MachineVerifier {
87 MachineVerifier(Pass *pass, const char *b) : PASS(pass), Banner(b) {}
89 unsigned verify(const MachineFunction &MF);
91 Pass *const PASS;
92 const char *Banner;
93 const MachineFunction *MF;
94 const TargetMachine *TM;
95 const TargetInstrInfo *TII;
96 const TargetRegisterInfo *TRI;
97 const MachineRegisterInfo *MRI;
99 unsigned foundErrors;
101 // Avoid querying the MachineFunctionProperties for each operand.
102 bool isFunctionRegBankSelected;
103 bool isFunctionSelected;
104 bool isFunctionTracksDebugUserValues;
106 using RegVector = SmallVector<Register, 16>;
107 using RegMaskVector = SmallVector<const uint32_t *, 4>;
108 using RegSet = DenseSet<Register>;
109 using RegMap = DenseMap<Register, const MachineInstr *>;
110 using BlockSet = SmallPtrSet<const MachineBasicBlock *, 8>;
112 const MachineInstr *FirstNonPHI;
113 const MachineInstr *FirstTerminator;
114 BlockSet FunctionBlocks;
116 BitVector regsReserved;
117 RegSet regsLive;
118 RegVector regsDefined, regsDead, regsKilled;
119 RegMaskVector regMasks;
121 SlotIndex lastIndex;
123 // Add Reg and any sub-registers to RV
124 void addRegWithSubRegs(RegVector &RV, Register Reg) {
125 RV.push_back(Reg);
126 if (Reg.isPhysical())
127 append_range(RV, TRI->subregs(Reg.asMCReg()));
130 struct BBInfo {
131 // Is this MBB reachable from the MF entry point?
132 bool reachable = false;
134 // Vregs that must be live in because they are used without being
135 // defined. Map value is the user. vregsLiveIn doesn't include regs
136 // that only are used by PHI nodes.
137 RegMap vregsLiveIn;
139 // Regs killed in MBB. They may be defined again, and will then be in both
140 // regsKilled and regsLiveOut.
141 RegSet regsKilled;
143 // Regs defined in MBB and live out. Note that vregs passing through may
144 // be live out without being mentioned here.
145 RegSet regsLiveOut;
147 // Vregs that pass through MBB untouched. This set is disjoint from
148 // regsKilled and regsLiveOut.
149 RegSet vregsPassed;
151 // Vregs that must pass through MBB because they are needed by a successor
152 // block. This set is disjoint from regsLiveOut.
153 RegSet vregsRequired;
155 // Set versions of block's predecessor and successor lists.
156 BlockSet Preds, Succs;
158 BBInfo() = default;
160 // Add register to vregsRequired if it belongs there. Return true if
161 // anything changed.
162 bool addRequired(Register Reg) {
163 if (!Reg.isVirtual())
164 return false;
165 if (regsLiveOut.count(Reg))
166 return false;
167 return vregsRequired.insert(Reg).second;
170 // Same for a full set.
171 bool addRequired(const RegSet &RS) {
172 bool Changed = false;
173 for (Register Reg : RS)
174 Changed |= addRequired(Reg);
175 return Changed;
178 // Same for a full map.
179 bool addRequired(const RegMap &RM) {
180 bool Changed = false;
181 for (const auto &I : RM)
182 Changed |= addRequired(I.first);
183 return Changed;
186 // Live-out registers are either in regsLiveOut or vregsPassed.
187 bool isLiveOut(Register Reg) const {
188 return regsLiveOut.count(Reg) || vregsPassed.count(Reg);
192 // Extra register info per MBB.
193 DenseMap<const MachineBasicBlock*, BBInfo> MBBInfoMap;
195 bool isReserved(Register Reg) {
196 return Reg.id() < regsReserved.size() && regsReserved.test(Reg.id());
199 bool isAllocatable(Register Reg) const {
200 return Reg.id() < TRI->getNumRegs() && TRI->isInAllocatableClass(Reg) &&
201 !regsReserved.test(Reg.id());
204 // Analysis information if available
205 LiveVariables *LiveVars;
206 LiveIntervals *LiveInts;
207 LiveStacks *LiveStks;
208 SlotIndexes *Indexes;
210 void visitMachineFunctionBefore();
211 void visitMachineBasicBlockBefore(const MachineBasicBlock *MBB);
212 void visitMachineBundleBefore(const MachineInstr *MI);
214 /// Verify that all of \p MI's virtual register operands are scalars.
215 /// \returns True if all virtual register operands are scalar. False
216 /// otherwise.
217 bool verifyAllRegOpsScalar(const MachineInstr &MI,
218 const MachineRegisterInfo &MRI);
219 bool verifyVectorElementMatch(LLT Ty0, LLT Ty1, const MachineInstr *MI);
220 void verifyPreISelGenericInstruction(const MachineInstr *MI);
221 void visitMachineInstrBefore(const MachineInstr *MI);
222 void visitMachineOperand(const MachineOperand *MO, unsigned MONum);
223 void visitMachineBundleAfter(const MachineInstr *MI);
224 void visitMachineBasicBlockAfter(const MachineBasicBlock *MBB);
225 void visitMachineFunctionAfter();
227 void report(const char *msg, const MachineFunction *MF);
228 void report(const char *msg, const MachineBasicBlock *MBB);
229 void report(const char *msg, const MachineInstr *MI);
230 void report(const char *msg, const MachineOperand *MO, unsigned MONum,
231 LLT MOVRegType = LLT{});
232 void report(const Twine &Msg, const MachineInstr *MI);
234 void report_context(const LiveInterval &LI) const;
235 void report_context(const LiveRange &LR, Register VRegUnit,
236 LaneBitmask LaneMask) const;
237 void report_context(const LiveRange::Segment &S) const;
238 void report_context(const VNInfo &VNI) const;
239 void report_context(SlotIndex Pos) const;
240 void report_context(MCPhysReg PhysReg) const;
241 void report_context_liverange(const LiveRange &LR) const;
242 void report_context_lanemask(LaneBitmask LaneMask) const;
243 void report_context_vreg(Register VReg) const;
244 void report_context_vreg_regunit(Register VRegOrUnit) const;
246 void verifyInlineAsm(const MachineInstr *MI);
248 void checkLiveness(const MachineOperand *MO, unsigned MONum);
249 void checkLivenessAtUse(const MachineOperand *MO, unsigned MONum,
250 SlotIndex UseIdx, const LiveRange &LR,
251 Register VRegOrUnit,
252 LaneBitmask LaneMask = LaneBitmask::getNone());
253 void checkLivenessAtDef(const MachineOperand *MO, unsigned MONum,
254 SlotIndex DefIdx, const LiveRange &LR,
255 Register VRegOrUnit, bool SubRangeCheck = false,
256 LaneBitmask LaneMask = LaneBitmask::getNone());
258 void markReachable(const MachineBasicBlock *MBB);
259 void calcRegsPassed();
260 void checkPHIOps(const MachineBasicBlock &MBB);
262 void calcRegsRequired();
263 void verifyLiveVariables();
264 void verifyLiveIntervals();
265 void verifyLiveInterval(const LiveInterval&);
266 void verifyLiveRangeValue(const LiveRange &, const VNInfo *, Register,
267 LaneBitmask);
268 void verifyLiveRangeSegment(const LiveRange &,
269 const LiveRange::const_iterator I, Register,
270 LaneBitmask);
271 void verifyLiveRange(const LiveRange &, Register,
272 LaneBitmask LaneMask = LaneBitmask::getNone());
274 void verifyStackFrame();
276 void verifySlotIndexes() const;
277 void verifyProperties(const MachineFunction &MF);
280 struct MachineVerifierPass : public MachineFunctionPass {
281 static char ID; // Pass ID, replacement for typeid
283 const std::string Banner;
285 MachineVerifierPass(std::string banner = std::string())
286 : MachineFunctionPass(ID), Banner(std::move(banner)) {
287 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
290 void getAnalysisUsage(AnalysisUsage &AU) const override {
291 AU.setPreservesAll();
292 MachineFunctionPass::getAnalysisUsage(AU);
295 bool runOnMachineFunction(MachineFunction &MF) override {
296 // Skip functions that have known verification problems.
297 // FIXME: Remove this mechanism when all problematic passes have been
298 // fixed.
299 if (MF.getProperties().hasProperty(
300 MachineFunctionProperties::Property::FailsVerification))
301 return false;
303 unsigned FoundErrors = MachineVerifier(this, Banner.c_str()).verify(MF);
304 if (FoundErrors)
305 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
306 return false;
310 } // end anonymous namespace
312 char MachineVerifierPass::ID = 0;
314 INITIALIZE_PASS(MachineVerifierPass, "machineverifier",
315 "Verify generated machine code", false, false)
317 FunctionPass *llvm::createMachineVerifierPass(const std::string &Banner) {
318 return new MachineVerifierPass(Banner);
321 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager *,
322 const std::string &Banner,
323 const MachineFunction &MF) {
324 // TODO: Use MFAM after porting below analyses.
325 // LiveVariables *LiveVars;
326 // LiveIntervals *LiveInts;
327 // LiveStacks *LiveStks;
328 // SlotIndexes *Indexes;
329 unsigned FoundErrors = MachineVerifier(nullptr, Banner.c_str()).verify(MF);
330 if (FoundErrors)
331 report_fatal_error("Found " + Twine(FoundErrors) + " machine code errors.");
334 bool MachineFunction::verify(Pass *p, const char *Banner, bool AbortOnErrors)
335 const {
336 MachineFunction &MF = const_cast<MachineFunction&>(*this);
337 unsigned FoundErrors = MachineVerifier(p, Banner).verify(MF);
338 if (AbortOnErrors && FoundErrors)
339 report_fatal_error("Found "+Twine(FoundErrors)+" machine code errors.");
340 return FoundErrors == 0;
343 void MachineVerifier::verifySlotIndexes() const {
344 if (Indexes == nullptr)
345 return;
347 // Ensure the IdxMBB list is sorted by slot indexes.
348 SlotIndex Last;
349 for (SlotIndexes::MBBIndexIterator I = Indexes->MBBIndexBegin(),
350 E = Indexes->MBBIndexEnd(); I != E; ++I) {
351 assert(!Last.isValid() || I->first > Last);
352 Last = I->first;
356 void MachineVerifier::verifyProperties(const MachineFunction &MF) {
357 // If a pass has introduced virtual registers without clearing the
358 // NoVRegs property (or set it without allocating the vregs)
359 // then report an error.
360 if (MF.getProperties().hasProperty(
361 MachineFunctionProperties::Property::NoVRegs) &&
362 MRI->getNumVirtRegs())
363 report("Function has NoVRegs property but there are VReg operands", &MF);
366 unsigned MachineVerifier::verify(const MachineFunction &MF) {
367 foundErrors = 0;
369 this->MF = &MF;
370 TM = &MF.getTarget();
371 TII = MF.getSubtarget().getInstrInfo();
372 TRI = MF.getSubtarget().getRegisterInfo();
373 MRI = &MF.getRegInfo();
375 const bool isFunctionFailedISel = MF.getProperties().hasProperty(
376 MachineFunctionProperties::Property::FailedISel);
378 // If we're mid-GlobalISel and we already triggered the fallback path then
379 // it's expected that the MIR is somewhat broken but that's ok since we'll
380 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
381 if (isFunctionFailedISel)
382 return foundErrors;
384 isFunctionRegBankSelected = MF.getProperties().hasProperty(
385 MachineFunctionProperties::Property::RegBankSelected);
386 isFunctionSelected = MF.getProperties().hasProperty(
387 MachineFunctionProperties::Property::Selected);
388 isFunctionTracksDebugUserValues = MF.getProperties().hasProperty(
389 MachineFunctionProperties::Property::TracksDebugUserValues);
391 LiveVars = nullptr;
392 LiveInts = nullptr;
393 LiveStks = nullptr;
394 Indexes = nullptr;
395 if (PASS) {
396 LiveInts = PASS->getAnalysisIfAvailable<LiveIntervals>();
397 // We don't want to verify LiveVariables if LiveIntervals is available.
398 if (!LiveInts)
399 LiveVars = PASS->getAnalysisIfAvailable<LiveVariables>();
400 LiveStks = PASS->getAnalysisIfAvailable<LiveStacks>();
401 Indexes = PASS->getAnalysisIfAvailable<SlotIndexes>();
404 verifySlotIndexes();
406 verifyProperties(MF);
408 visitMachineFunctionBefore();
409 for (const MachineBasicBlock &MBB : MF) {
410 visitMachineBasicBlockBefore(&MBB);
411 // Keep track of the current bundle header.
412 const MachineInstr *CurBundle = nullptr;
413 // Do we expect the next instruction to be part of the same bundle?
414 bool InBundle = false;
416 for (const MachineInstr &MI : MBB.instrs()) {
417 if (MI.getParent() != &MBB) {
418 report("Bad instruction parent pointer", &MBB);
419 errs() << "Instruction: " << MI;
420 continue;
423 // Check for consistent bundle flags.
424 if (InBundle && !MI.isBundledWithPred())
425 report("Missing BundledPred flag, "
426 "BundledSucc was set on predecessor",
427 &MI);
428 if (!InBundle && MI.isBundledWithPred())
429 report("BundledPred flag is set, "
430 "but BundledSucc not set on predecessor",
431 &MI);
433 // Is this a bundle header?
434 if (!MI.isInsideBundle()) {
435 if (CurBundle)
436 visitMachineBundleAfter(CurBundle);
437 CurBundle = &MI;
438 visitMachineBundleBefore(CurBundle);
439 } else if (!CurBundle)
440 report("No bundle header", &MI);
441 visitMachineInstrBefore(&MI);
442 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
443 const MachineOperand &Op = MI.getOperand(I);
444 if (Op.getParent() != &MI) {
445 // Make sure to use correct addOperand / RemoveOperand / ChangeTo
446 // functions when replacing operands of a MachineInstr.
447 report("Instruction has operand with wrong parent set", &MI);
450 visitMachineOperand(&Op, I);
453 // Was this the last bundled instruction?
454 InBundle = MI.isBundledWithSucc();
456 if (CurBundle)
457 visitMachineBundleAfter(CurBundle);
458 if (InBundle)
459 report("BundledSucc flag set on last instruction in block", &MBB.back());
460 visitMachineBasicBlockAfter(&MBB);
462 visitMachineFunctionAfter();
464 // Clean up.
465 regsLive.clear();
466 regsDefined.clear();
467 regsDead.clear();
468 regsKilled.clear();
469 regMasks.clear();
470 MBBInfoMap.clear();
472 return foundErrors;
475 void MachineVerifier::report(const char *msg, const MachineFunction *MF) {
476 assert(MF);
477 errs() << '\n';
478 if (!foundErrors++) {
479 if (Banner)
480 errs() << "# " << Banner << '\n';
481 if (LiveInts != nullptr)
482 LiveInts->print(errs());
483 else
484 MF->print(errs(), Indexes);
486 errs() << "*** Bad machine code: " << msg << " ***\n"
487 << "- function: " << MF->getName() << "\n";
490 void MachineVerifier::report(const char *msg, const MachineBasicBlock *MBB) {
491 assert(MBB);
492 report(msg, MBB->getParent());
493 errs() << "- basic block: " << printMBBReference(*MBB) << ' '
494 << MBB->getName() << " (" << (const void *)MBB << ')';
495 if (Indexes)
496 errs() << " [" << Indexes->getMBBStartIdx(MBB)
497 << ';' << Indexes->getMBBEndIdx(MBB) << ')';
498 errs() << '\n';
501 void MachineVerifier::report(const char *msg, const MachineInstr *MI) {
502 assert(MI);
503 report(msg, MI->getParent());
504 errs() << "- instruction: ";
505 if (Indexes && Indexes->hasIndex(*MI))
506 errs() << Indexes->getInstructionIndex(*MI) << '\t';
507 MI->print(errs(), /*IsStandalone=*/true);
510 void MachineVerifier::report(const char *msg, const MachineOperand *MO,
511 unsigned MONum, LLT MOVRegType) {
512 assert(MO);
513 report(msg, MO->getParent());
514 errs() << "- operand " << MONum << ": ";
515 MO->print(errs(), MOVRegType, TRI);
516 errs() << "\n";
519 void MachineVerifier::report(const Twine &Msg, const MachineInstr *MI) {
520 report(Msg.str().c_str(), MI);
523 void MachineVerifier::report_context(SlotIndex Pos) const {
524 errs() << "- at: " << Pos << '\n';
527 void MachineVerifier::report_context(const LiveInterval &LI) const {
528 errs() << "- interval: " << LI << '\n';
531 void MachineVerifier::report_context(const LiveRange &LR, Register VRegUnit,
532 LaneBitmask LaneMask) const {
533 report_context_liverange(LR);
534 report_context_vreg_regunit(VRegUnit);
535 if (LaneMask.any())
536 report_context_lanemask(LaneMask);
539 void MachineVerifier::report_context(const LiveRange::Segment &S) const {
540 errs() << "- segment: " << S << '\n';
543 void MachineVerifier::report_context(const VNInfo &VNI) const {
544 errs() << "- ValNo: " << VNI.id << " (def " << VNI.def << ")\n";
547 void MachineVerifier::report_context_liverange(const LiveRange &LR) const {
548 errs() << "- liverange: " << LR << '\n';
551 void MachineVerifier::report_context(MCPhysReg PReg) const {
552 errs() << "- p. register: " << printReg(PReg, TRI) << '\n';
555 void MachineVerifier::report_context_vreg(Register VReg) const {
556 errs() << "- v. register: " << printReg(VReg, TRI) << '\n';
559 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit) const {
560 if (Register::isVirtualRegister(VRegOrUnit)) {
561 report_context_vreg(VRegOrUnit);
562 } else {
563 errs() << "- regunit: " << printRegUnit(VRegOrUnit, TRI) << '\n';
567 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask) const {
568 errs() << "- lanemask: " << PrintLaneMask(LaneMask) << '\n';
571 void MachineVerifier::markReachable(const MachineBasicBlock *MBB) {
572 BBInfo &MInfo = MBBInfoMap[MBB];
573 if (!MInfo.reachable) {
574 MInfo.reachable = true;
575 for (const MachineBasicBlock *Succ : MBB->successors())
576 markReachable(Succ);
580 void MachineVerifier::visitMachineFunctionBefore() {
581 lastIndex = SlotIndex();
582 regsReserved = MRI->reservedRegsFrozen() ? MRI->getReservedRegs()
583 : TRI->getReservedRegs(*MF);
585 if (!MF->empty())
586 markReachable(&MF->front());
588 // Build a set of the basic blocks in the function.
589 FunctionBlocks.clear();
590 for (const auto &MBB : *MF) {
591 FunctionBlocks.insert(&MBB);
592 BBInfo &MInfo = MBBInfoMap[&MBB];
594 MInfo.Preds.insert(MBB.pred_begin(), MBB.pred_end());
595 if (MInfo.Preds.size() != MBB.pred_size())
596 report("MBB has duplicate entries in its predecessor list.", &MBB);
598 MInfo.Succs.insert(MBB.succ_begin(), MBB.succ_end());
599 if (MInfo.Succs.size() != MBB.succ_size())
600 report("MBB has duplicate entries in its successor list.", &MBB);
603 // Check that the register use lists are sane.
604 MRI->verifyUseLists();
606 if (!MF->empty())
607 verifyStackFrame();
610 void
611 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock *MBB) {
612 FirstTerminator = nullptr;
613 FirstNonPHI = nullptr;
615 if (!MF->getProperties().hasProperty(
616 MachineFunctionProperties::Property::NoPHIs) && MRI->tracksLiveness()) {
617 // If this block has allocatable physical registers live-in, check that
618 // it is an entry block or landing pad.
619 for (const auto &LI : MBB->liveins()) {
620 if (isAllocatable(LI.PhysReg) && !MBB->isEHPad() &&
621 MBB->getIterator() != MBB->getParent()->begin()) {
622 report("MBB has allocatable live-in, but isn't entry or landing-pad.", MBB);
623 report_context(LI.PhysReg);
628 // Count the number of landing pad successors.
629 SmallPtrSet<const MachineBasicBlock*, 4> LandingPadSuccs;
630 for (const auto *succ : MBB->successors()) {
631 if (succ->isEHPad())
632 LandingPadSuccs.insert(succ);
633 if (!FunctionBlocks.count(succ))
634 report("MBB has successor that isn't part of the function.", MBB);
635 if (!MBBInfoMap[succ].Preds.count(MBB)) {
636 report("Inconsistent CFG", MBB);
637 errs() << "MBB is not in the predecessor list of the successor "
638 << printMBBReference(*succ) << ".\n";
642 // Check the predecessor list.
643 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
644 if (!FunctionBlocks.count(Pred))
645 report("MBB has predecessor that isn't part of the function.", MBB);
646 if (!MBBInfoMap[Pred].Succs.count(MBB)) {
647 report("Inconsistent CFG", MBB);
648 errs() << "MBB is not in the successor list of the predecessor "
649 << printMBBReference(*Pred) << ".\n";
653 const MCAsmInfo *AsmInfo = TM->getMCAsmInfo();
654 const BasicBlock *BB = MBB->getBasicBlock();
655 const Function &F = MF->getFunction();
656 if (LandingPadSuccs.size() > 1 &&
657 !(AsmInfo &&
658 AsmInfo->getExceptionHandlingType() == ExceptionHandling::SjLj &&
659 BB && isa<SwitchInst>(BB->getTerminator())) &&
660 !isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
661 report("MBB has more than one landing pad successor", MBB);
663 // Call analyzeBranch. If it succeeds, there several more conditions to check.
664 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
665 SmallVector<MachineOperand, 4> Cond;
666 if (!TII->analyzeBranch(*const_cast<MachineBasicBlock *>(MBB), TBB, FBB,
667 Cond)) {
668 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
669 // check whether its answers match up with reality.
670 if (!TBB && !FBB) {
671 // Block falls through to its successor.
672 if (!MBB->empty() && MBB->back().isBarrier() &&
673 !TII->isPredicated(MBB->back())) {
674 report("MBB exits via unconditional fall-through but ends with a "
675 "barrier instruction!", MBB);
677 if (!Cond.empty()) {
678 report("MBB exits via unconditional fall-through but has a condition!",
679 MBB);
681 } else if (TBB && !FBB && Cond.empty()) {
682 // Block unconditionally branches somewhere.
683 if (MBB->empty()) {
684 report("MBB exits via unconditional branch but doesn't contain "
685 "any instructions!", MBB);
686 } else if (!MBB->back().isBarrier()) {
687 report("MBB exits via unconditional branch but doesn't end with a "
688 "barrier instruction!", MBB);
689 } else if (!MBB->back().isTerminator()) {
690 report("MBB exits via unconditional branch but the branch isn't a "
691 "terminator instruction!", MBB);
693 } else if (TBB && !FBB && !Cond.empty()) {
694 // Block conditionally branches somewhere, otherwise falls through.
695 if (MBB->empty()) {
696 report("MBB exits via conditional branch/fall-through but doesn't "
697 "contain any instructions!", MBB);
698 } else if (MBB->back().isBarrier()) {
699 report("MBB exits via conditional branch/fall-through but ends with a "
700 "barrier instruction!", MBB);
701 } else if (!MBB->back().isTerminator()) {
702 report("MBB exits via conditional branch/fall-through but the branch "
703 "isn't a terminator instruction!", MBB);
705 } else if (TBB && FBB) {
706 // Block conditionally branches somewhere, otherwise branches
707 // somewhere else.
708 if (MBB->empty()) {
709 report("MBB exits via conditional branch/branch but doesn't "
710 "contain any instructions!", MBB);
711 } else if (!MBB->back().isBarrier()) {
712 report("MBB exits via conditional branch/branch but doesn't end with a "
713 "barrier instruction!", MBB);
714 } else if (!MBB->back().isTerminator()) {
715 report("MBB exits via conditional branch/branch but the branch "
716 "isn't a terminator instruction!", MBB);
718 if (Cond.empty()) {
719 report("MBB exits via conditional branch/branch but there's no "
720 "condition!", MBB);
722 } else {
723 report("analyzeBranch returned invalid data!", MBB);
726 // Now check that the successors match up with the answers reported by
727 // analyzeBranch.
728 if (TBB && !MBB->isSuccessor(TBB))
729 report("MBB exits via jump or conditional branch, but its target isn't a "
730 "CFG successor!",
731 MBB);
732 if (FBB && !MBB->isSuccessor(FBB))
733 report("MBB exits via conditional branch, but its target isn't a CFG "
734 "successor!",
735 MBB);
737 // There might be a fallthrough to the next block if there's either no
738 // unconditional true branch, or if there's a condition, and one of the
739 // branches is missing.
740 bool Fallthrough = !TBB || (!Cond.empty() && !FBB);
742 // A conditional fallthrough must be an actual CFG successor, not
743 // unreachable. (Conversely, an unconditional fallthrough might not really
744 // be a successor, because the block might end in unreachable.)
745 if (!Cond.empty() && !FBB) {
746 MachineFunction::const_iterator MBBI = std::next(MBB->getIterator());
747 if (MBBI == MF->end()) {
748 report("MBB conditionally falls through out of function!", MBB);
749 } else if (!MBB->isSuccessor(&*MBBI))
750 report("MBB exits via conditional branch/fall-through but the CFG "
751 "successors don't match the actual successors!",
752 MBB);
755 // Verify that there aren't any extra un-accounted-for successors.
756 for (const MachineBasicBlock *SuccMBB : MBB->successors()) {
757 // If this successor is one of the branch targets, it's okay.
758 if (SuccMBB == TBB || SuccMBB == FBB)
759 continue;
760 // If we might have a fallthrough, and the successor is the fallthrough
761 // block, that's also ok.
762 if (Fallthrough && SuccMBB == MBB->getNextNode())
763 continue;
764 // Also accept successors which are for exception-handling or might be
765 // inlineasm_br targets.
766 if (SuccMBB->isEHPad() || SuccMBB->isInlineAsmBrIndirectTarget())
767 continue;
768 report("MBB has unexpected successors which are not branch targets, "
769 "fallthrough, EHPads, or inlineasm_br targets.",
770 MBB);
774 regsLive.clear();
775 if (MRI->tracksLiveness()) {
776 for (const auto &LI : MBB->liveins()) {
777 if (!Register::isPhysicalRegister(LI.PhysReg)) {
778 report("MBB live-in list contains non-physical register", MBB);
779 continue;
781 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(LI.PhysReg))
782 regsLive.insert(SubReg);
786 const MachineFrameInfo &MFI = MF->getFrameInfo();
787 BitVector PR = MFI.getPristineRegs(*MF);
788 for (unsigned I : PR.set_bits()) {
789 for (const MCPhysReg &SubReg : TRI->subregs_inclusive(I))
790 regsLive.insert(SubReg);
793 regsKilled.clear();
794 regsDefined.clear();
796 if (Indexes)
797 lastIndex = Indexes->getMBBStartIdx(MBB);
800 // This function gets called for all bundle headers, including normal
801 // stand-alone unbundled instructions.
802 void MachineVerifier::visitMachineBundleBefore(const MachineInstr *MI) {
803 if (Indexes && Indexes->hasIndex(*MI)) {
804 SlotIndex idx = Indexes->getInstructionIndex(*MI);
805 if (!(idx > lastIndex)) {
806 report("Instruction index out of order", MI);
807 errs() << "Last instruction was at " << lastIndex << '\n';
809 lastIndex = idx;
812 // Ensure non-terminators don't follow terminators.
813 if (MI->isTerminator()) {
814 if (!FirstTerminator)
815 FirstTerminator = MI;
816 } else if (FirstTerminator) {
817 report("Non-terminator instruction after the first terminator", MI);
818 errs() << "First terminator was:\t" << *FirstTerminator;
822 // The operands on an INLINEASM instruction must follow a template.
823 // Verify that the flag operands make sense.
824 void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) {
825 // The first two operands on INLINEASM are the asm string and global flags.
826 if (MI->getNumOperands() < 2) {
827 report("Too few operands on inline asm", MI);
828 return;
830 if (!MI->getOperand(0).isSymbol())
831 report("Asm string must be an external symbol", MI);
832 if (!MI->getOperand(1).isImm())
833 report("Asm flags must be an immediate", MI);
834 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
835 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
836 // and Extra_IsConvergent = 32.
837 if (!isUInt<6>(MI->getOperand(1).getImm()))
838 report("Unknown asm flags", &MI->getOperand(1), 1);
840 static_assert(InlineAsm::MIOp_FirstOperand == 2, "Asm format changed");
842 unsigned OpNo = InlineAsm::MIOp_FirstOperand;
843 unsigned NumOps;
844 for (unsigned e = MI->getNumOperands(); OpNo < e; OpNo += NumOps) {
845 const MachineOperand &MO = MI->getOperand(OpNo);
846 // There may be implicit ops after the fixed operands.
847 if (!MO.isImm())
848 break;
849 NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm());
852 if (OpNo > MI->getNumOperands())
853 report("Missing operands in last group", MI);
855 // An optional MDNode follows the groups.
856 if (OpNo < MI->getNumOperands() && MI->getOperand(OpNo).isMetadata())
857 ++OpNo;
859 // All trailing operands must be implicit registers.
860 for (unsigned e = MI->getNumOperands(); OpNo < e; ++OpNo) {
861 const MachineOperand &MO = MI->getOperand(OpNo);
862 if (!MO.isReg() || !MO.isImplicit())
863 report("Expected implicit register after groups", &MO, OpNo);
867 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr &MI,
868 const MachineRegisterInfo &MRI) {
869 if (none_of(MI.explicit_operands(), [&MRI](const MachineOperand &Op) {
870 if (!Op.isReg())
871 return false;
872 const auto Reg = Op.getReg();
873 if (Reg.isPhysical())
874 return false;
875 return !MRI.getType(Reg).isScalar();
877 return true;
878 report("All register operands must have scalar types", &MI);
879 return false;
882 /// Check that types are consistent when two operands need to have the same
883 /// number of vector elements.
884 /// \return true if the types are valid.
885 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0, LLT Ty1,
886 const MachineInstr *MI) {
887 if (Ty0.isVector() != Ty1.isVector()) {
888 report("operand types must be all-vector or all-scalar", MI);
889 // Generally we try to report as many issues as possible at once, but in
890 // this case it's not clear what should we be comparing the size of the
891 // scalar with: the size of the whole vector or its lane. Instead of
892 // making an arbitrary choice and emitting not so helpful message, let's
893 // avoid the extra noise and stop here.
894 return false;
897 if (Ty0.isVector() && Ty0.getNumElements() != Ty1.getNumElements()) {
898 report("operand types must preserve number of vector elements", MI);
899 return false;
902 return true;
905 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr *MI) {
906 if (isFunctionSelected)
907 report("Unexpected generic instruction in a Selected function", MI);
909 const MCInstrDesc &MCID = MI->getDesc();
910 unsigned NumOps = MI->getNumOperands();
912 // Branches must reference a basic block if they are not indirect
913 if (MI->isBranch() && !MI->isIndirectBranch()) {
914 bool HasMBB = false;
915 for (const MachineOperand &Op : MI->operands()) {
916 if (Op.isMBB()) {
917 HasMBB = true;
918 break;
922 if (!HasMBB) {
923 report("Branch instruction is missing a basic block operand or "
924 "isIndirectBranch property",
925 MI);
929 // Check types.
930 SmallVector<LLT, 4> Types;
931 for (unsigned I = 0, E = std::min(MCID.getNumOperands(), NumOps);
932 I != E; ++I) {
933 if (!MCID.OpInfo[I].isGenericType())
934 continue;
935 // Generic instructions specify type equality constraints between some of
936 // their operands. Make sure these are consistent.
937 size_t TypeIdx = MCID.OpInfo[I].getGenericTypeIndex();
938 Types.resize(std::max(TypeIdx + 1, Types.size()));
940 const MachineOperand *MO = &MI->getOperand(I);
941 if (!MO->isReg()) {
942 report("generic instruction must use register operands", MI);
943 continue;
946 LLT OpTy = MRI->getType(MO->getReg());
947 // Don't report a type mismatch if there is no actual mismatch, only a
948 // type missing, to reduce noise:
949 if (OpTy.isValid()) {
950 // Only the first valid type for a type index will be printed: don't
951 // overwrite it later so it's always clear which type was expected:
952 if (!Types[TypeIdx].isValid())
953 Types[TypeIdx] = OpTy;
954 else if (Types[TypeIdx] != OpTy)
955 report("Type mismatch in generic instruction", MO, I, OpTy);
956 } else {
957 // Generic instructions must have types attached to their operands.
958 report("Generic instruction is missing a virtual register type", MO, I);
962 // Generic opcodes must not have physical register operands.
963 for (unsigned I = 0; I < MI->getNumOperands(); ++I) {
964 const MachineOperand *MO = &MI->getOperand(I);
965 if (MO->isReg() && Register::isPhysicalRegister(MO->getReg()))
966 report("Generic instruction cannot have physical register", MO, I);
969 // Avoid out of bounds in checks below. This was already reported earlier.
970 if (MI->getNumOperands() < MCID.getNumOperands())
971 return;
973 StringRef ErrorInfo;
974 if (!TII->verifyInstruction(*MI, ErrorInfo))
975 report(ErrorInfo.data(), MI);
977 // Verify properties of various specific instruction types
978 unsigned Opc = MI->getOpcode();
979 switch (Opc) {
980 case TargetOpcode::G_ASSERT_SEXT:
981 case TargetOpcode::G_ASSERT_ZEXT: {
982 std::string OpcName =
983 Opc == TargetOpcode::G_ASSERT_ZEXT ? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
984 if (!MI->getOperand(2).isImm()) {
985 report(Twine(OpcName, " expects an immediate operand #2"), MI);
986 break;
989 Register Dst = MI->getOperand(0).getReg();
990 Register Src = MI->getOperand(1).getReg();
991 LLT SrcTy = MRI->getType(Src);
992 int64_t Imm = MI->getOperand(2).getImm();
993 if (Imm <= 0) {
994 report(Twine(OpcName, " size must be >= 1"), MI);
995 break;
998 if (Imm >= SrcTy.getScalarSizeInBits()) {
999 report(Twine(OpcName, " size must be less than source bit width"), MI);
1000 break;
1003 if (MRI->getRegBankOrNull(Src) != MRI->getRegBankOrNull(Dst)) {
1004 report(
1005 Twine(OpcName, " source and destination register banks must match"),
1006 MI);
1007 break;
1010 if (MRI->getRegClassOrNull(Src) != MRI->getRegClassOrNull(Dst))
1011 report(
1012 Twine(OpcName, " source and destination register classes must match"),
1013 MI);
1015 break;
1018 case TargetOpcode::G_CONSTANT:
1019 case TargetOpcode::G_FCONSTANT: {
1020 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1021 if (DstTy.isVector())
1022 report("Instruction cannot use a vector result type", MI);
1024 if (MI->getOpcode() == TargetOpcode::G_CONSTANT) {
1025 if (!MI->getOperand(1).isCImm()) {
1026 report("G_CONSTANT operand must be cimm", MI);
1027 break;
1030 const ConstantInt *CI = MI->getOperand(1).getCImm();
1031 if (CI->getBitWidth() != DstTy.getSizeInBits())
1032 report("inconsistent constant size", MI);
1033 } else {
1034 if (!MI->getOperand(1).isFPImm()) {
1035 report("G_FCONSTANT operand must be fpimm", MI);
1036 break;
1038 const ConstantFP *CF = MI->getOperand(1).getFPImm();
1040 if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) !=
1041 DstTy.getSizeInBits()) {
1042 report("inconsistent constant size", MI);
1046 break;
1048 case TargetOpcode::G_LOAD:
1049 case TargetOpcode::G_STORE:
1050 case TargetOpcode::G_ZEXTLOAD:
1051 case TargetOpcode::G_SEXTLOAD: {
1052 LLT ValTy = MRI->getType(MI->getOperand(0).getReg());
1053 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1054 if (!PtrTy.isPointer())
1055 report("Generic memory instruction must access a pointer", MI);
1057 // Generic loads and stores must have a single MachineMemOperand
1058 // describing that access.
1059 if (!MI->hasOneMemOperand()) {
1060 report("Generic instruction accessing memory must have one mem operand",
1061 MI);
1062 } else {
1063 const MachineMemOperand &MMO = **MI->memoperands_begin();
1064 if (MI->getOpcode() == TargetOpcode::G_ZEXTLOAD ||
1065 MI->getOpcode() == TargetOpcode::G_SEXTLOAD) {
1066 if (MMO.getSizeInBits() >= ValTy.getSizeInBits())
1067 report("Generic extload must have a narrower memory type", MI);
1068 } else if (MI->getOpcode() == TargetOpcode::G_LOAD) {
1069 if (MMO.getSize() > ValTy.getSizeInBytes())
1070 report("load memory size cannot exceed result size", MI);
1071 } else if (MI->getOpcode() == TargetOpcode::G_STORE) {
1072 if (ValTy.getSizeInBytes() < MMO.getSize())
1073 report("store memory size cannot exceed value size", MI);
1077 break;
1079 case TargetOpcode::G_PHI: {
1080 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1081 if (!DstTy.isValid() || !all_of(drop_begin(MI->operands()),
1082 [this, &DstTy](const MachineOperand &MO) {
1083 if (!MO.isReg())
1084 return true;
1085 LLT Ty = MRI->getType(MO.getReg());
1086 if (!Ty.isValid() || (Ty != DstTy))
1087 return false;
1088 return true;
1090 report("Generic Instruction G_PHI has operands with incompatible/missing "
1091 "types",
1092 MI);
1093 break;
1095 case TargetOpcode::G_BITCAST: {
1096 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1097 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1098 if (!DstTy.isValid() || !SrcTy.isValid())
1099 break;
1101 if (SrcTy.isPointer() != DstTy.isPointer())
1102 report("bitcast cannot convert between pointers and other types", MI);
1104 if (SrcTy.getSizeInBits() != DstTy.getSizeInBits())
1105 report("bitcast sizes must match", MI);
1107 if (SrcTy == DstTy)
1108 report("bitcast must change the type", MI);
1110 break;
1112 case TargetOpcode::G_INTTOPTR:
1113 case TargetOpcode::G_PTRTOINT:
1114 case TargetOpcode::G_ADDRSPACE_CAST: {
1115 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1116 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1117 if (!DstTy.isValid() || !SrcTy.isValid())
1118 break;
1120 verifyVectorElementMatch(DstTy, SrcTy, MI);
1122 DstTy = DstTy.getScalarType();
1123 SrcTy = SrcTy.getScalarType();
1125 if (MI->getOpcode() == TargetOpcode::G_INTTOPTR) {
1126 if (!DstTy.isPointer())
1127 report("inttoptr result type must be a pointer", MI);
1128 if (SrcTy.isPointer())
1129 report("inttoptr source type must not be a pointer", MI);
1130 } else if (MI->getOpcode() == TargetOpcode::G_PTRTOINT) {
1131 if (!SrcTy.isPointer())
1132 report("ptrtoint source type must be a pointer", MI);
1133 if (DstTy.isPointer())
1134 report("ptrtoint result type must not be a pointer", MI);
1135 } else {
1136 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST);
1137 if (!SrcTy.isPointer() || !DstTy.isPointer())
1138 report("addrspacecast types must be pointers", MI);
1139 else {
1140 if (SrcTy.getAddressSpace() == DstTy.getAddressSpace())
1141 report("addrspacecast must convert different address spaces", MI);
1145 break;
1147 case TargetOpcode::G_PTR_ADD: {
1148 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1149 LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
1150 LLT OffsetTy = MRI->getType(MI->getOperand(2).getReg());
1151 if (!DstTy.isValid() || !PtrTy.isValid() || !OffsetTy.isValid())
1152 break;
1154 if (!PtrTy.getScalarType().isPointer())
1155 report("gep first operand must be a pointer", MI);
1157 if (OffsetTy.getScalarType().isPointer())
1158 report("gep offset operand must not be a pointer", MI);
1160 // TODO: Is the offset allowed to be a scalar with a vector?
1161 break;
1163 case TargetOpcode::G_PTRMASK: {
1164 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1165 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1166 LLT MaskTy = MRI->getType(MI->getOperand(2).getReg());
1167 if (!DstTy.isValid() || !SrcTy.isValid() || !MaskTy.isValid())
1168 break;
1170 if (!DstTy.getScalarType().isPointer())
1171 report("ptrmask result type must be a pointer", MI);
1173 if (!MaskTy.getScalarType().isScalar())
1174 report("ptrmask mask type must be an integer", MI);
1176 verifyVectorElementMatch(DstTy, MaskTy, MI);
1177 break;
1179 case TargetOpcode::G_SEXT:
1180 case TargetOpcode::G_ZEXT:
1181 case TargetOpcode::G_ANYEXT:
1182 case TargetOpcode::G_TRUNC:
1183 case TargetOpcode::G_FPEXT:
1184 case TargetOpcode::G_FPTRUNC: {
1185 // Number of operands and presense of types is already checked (and
1186 // reported in case of any issues), so no need to report them again. As
1187 // we're trying to report as many issues as possible at once, however, the
1188 // instructions aren't guaranteed to have the right number of operands or
1189 // types attached to them at this point
1190 assert(MCID.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1191 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1192 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1193 if (!DstTy.isValid() || !SrcTy.isValid())
1194 break;
1196 LLT DstElTy = DstTy.getScalarType();
1197 LLT SrcElTy = SrcTy.getScalarType();
1198 if (DstElTy.isPointer() || SrcElTy.isPointer())
1199 report("Generic extend/truncate can not operate on pointers", MI);
1201 verifyVectorElementMatch(DstTy, SrcTy, MI);
1203 unsigned DstSize = DstElTy.getSizeInBits();
1204 unsigned SrcSize = SrcElTy.getSizeInBits();
1205 switch (MI->getOpcode()) {
1206 default:
1207 if (DstSize <= SrcSize)
1208 report("Generic extend has destination type no larger than source", MI);
1209 break;
1210 case TargetOpcode::G_TRUNC:
1211 case TargetOpcode::G_FPTRUNC:
1212 if (DstSize >= SrcSize)
1213 report("Generic truncate has destination type no smaller than source",
1214 MI);
1215 break;
1217 break;
1219 case TargetOpcode::G_SELECT: {
1220 LLT SelTy = MRI->getType(MI->getOperand(0).getReg());
1221 LLT CondTy = MRI->getType(MI->getOperand(1).getReg());
1222 if (!SelTy.isValid() || !CondTy.isValid())
1223 break;
1225 // Scalar condition select on a vector is valid.
1226 if (CondTy.isVector())
1227 verifyVectorElementMatch(SelTy, CondTy, MI);
1228 break;
1230 case TargetOpcode::G_MERGE_VALUES: {
1231 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1232 // e.g. s2N = MERGE sN, sN
1233 // Merging multiple scalars into a vector is not allowed, should use
1234 // G_BUILD_VECTOR for that.
1235 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1236 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1237 if (DstTy.isVector() || SrcTy.isVector())
1238 report("G_MERGE_VALUES cannot operate on vectors", MI);
1240 const unsigned NumOps = MI->getNumOperands();
1241 if (DstTy.getSizeInBits() != SrcTy.getSizeInBits() * (NumOps - 1))
1242 report("G_MERGE_VALUES result size is inconsistent", MI);
1244 for (unsigned I = 2; I != NumOps; ++I) {
1245 if (MRI->getType(MI->getOperand(I).getReg()) != SrcTy)
1246 report("G_MERGE_VALUES source types do not match", MI);
1249 break;
1251 case TargetOpcode::G_UNMERGE_VALUES: {
1252 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1253 LLT SrcTy = MRI->getType(MI->getOperand(MI->getNumOperands()-1).getReg());
1254 // For now G_UNMERGE can split vectors.
1255 for (unsigned i = 0; i < MI->getNumOperands()-1; ++i) {
1256 if (MRI->getType(MI->getOperand(i).getReg()) != DstTy)
1257 report("G_UNMERGE_VALUES destination types do not match", MI);
1259 if (SrcTy.getSizeInBits() !=
1260 (DstTy.getSizeInBits() * (MI->getNumOperands() - 1))) {
1261 report("G_UNMERGE_VALUES source operand does not cover dest operands",
1262 MI);
1264 break;
1266 case TargetOpcode::G_BUILD_VECTOR: {
1267 // Source types must be scalars, dest type a vector. Total size of scalars
1268 // must match the dest vector size.
1269 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1270 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1271 if (!DstTy.isVector() || SrcEltTy.isVector()) {
1272 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI);
1273 break;
1276 if (DstTy.getElementType() != SrcEltTy)
1277 report("G_BUILD_VECTOR result element type must match source type", MI);
1279 if (DstTy.getNumElements() != MI->getNumOperands() - 1)
1280 report("G_BUILD_VECTOR must have an operand for each elemement", MI);
1282 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1283 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1284 report("G_BUILD_VECTOR source operand types are not homogeneous", MI);
1286 break;
1288 case TargetOpcode::G_BUILD_VECTOR_TRUNC: {
1289 // Source types must be scalars, dest type a vector. Scalar types must be
1290 // larger than the dest vector elt type, as this is a truncating operation.
1291 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1292 LLT SrcEltTy = MRI->getType(MI->getOperand(1).getReg());
1293 if (!DstTy.isVector() || SrcEltTy.isVector())
1294 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1295 MI);
1296 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1297 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1298 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1299 MI);
1300 if (SrcEltTy.getSizeInBits() <= DstTy.getElementType().getSizeInBits())
1301 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1302 "dest elt type",
1303 MI);
1304 break;
1306 case TargetOpcode::G_CONCAT_VECTORS: {
1307 // Source types should be vectors, and total size should match the dest
1308 // vector size.
1309 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1310 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1311 if (!DstTy.isVector() || !SrcTy.isVector())
1312 report("G_CONCAT_VECTOR requires vector source and destination operands",
1313 MI);
1315 if (MI->getNumOperands() < 3)
1316 report("G_CONCAT_VECTOR requires at least 2 source operands", MI);
1318 for (const MachineOperand &MO : llvm::drop_begin(MI->operands(), 2))
1319 if (MRI->getType(MI->getOperand(1).getReg()) != MRI->getType(MO.getReg()))
1320 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI);
1321 if (DstTy.getNumElements() !=
1322 SrcTy.getNumElements() * (MI->getNumOperands() - 1))
1323 report("G_CONCAT_VECTOR num dest and source elements should match", MI);
1324 break;
1326 case TargetOpcode::G_ICMP:
1327 case TargetOpcode::G_FCMP: {
1328 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1329 LLT SrcTy = MRI->getType(MI->getOperand(2).getReg());
1331 if ((DstTy.isVector() != SrcTy.isVector()) ||
1332 (DstTy.isVector() && DstTy.getNumElements() != SrcTy.getNumElements()))
1333 report("Generic vector icmp/fcmp must preserve number of lanes", MI);
1335 break;
1337 case TargetOpcode::G_EXTRACT: {
1338 const MachineOperand &SrcOp = MI->getOperand(1);
1339 if (!SrcOp.isReg()) {
1340 report("extract source must be a register", MI);
1341 break;
1344 const MachineOperand &OffsetOp = MI->getOperand(2);
1345 if (!OffsetOp.isImm()) {
1346 report("extract offset must be a constant", MI);
1347 break;
1350 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1351 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1352 if (SrcSize == DstSize)
1353 report("extract source must be larger than result", MI);
1355 if (DstSize + OffsetOp.getImm() > SrcSize)
1356 report("extract reads past end of register", MI);
1357 break;
1359 case TargetOpcode::G_INSERT: {
1360 const MachineOperand &SrcOp = MI->getOperand(2);
1361 if (!SrcOp.isReg()) {
1362 report("insert source must be a register", MI);
1363 break;
1366 const MachineOperand &OffsetOp = MI->getOperand(3);
1367 if (!OffsetOp.isImm()) {
1368 report("insert offset must be a constant", MI);
1369 break;
1372 unsigned DstSize = MRI->getType(MI->getOperand(0).getReg()).getSizeInBits();
1373 unsigned SrcSize = MRI->getType(SrcOp.getReg()).getSizeInBits();
1375 if (DstSize <= SrcSize)
1376 report("inserted size must be smaller than total register", MI);
1378 if (SrcSize + OffsetOp.getImm() > DstSize)
1379 report("insert writes past end of register", MI);
1381 break;
1383 case TargetOpcode::G_JUMP_TABLE: {
1384 if (!MI->getOperand(1).isJTI())
1385 report("G_JUMP_TABLE source operand must be a jump table index", MI);
1386 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1387 if (!DstTy.isPointer())
1388 report("G_JUMP_TABLE dest operand must have a pointer type", MI);
1389 break;
1391 case TargetOpcode::G_BRJT: {
1392 if (!MRI->getType(MI->getOperand(0).getReg()).isPointer())
1393 report("G_BRJT src operand 0 must be a pointer type", MI);
1395 if (!MI->getOperand(1).isJTI())
1396 report("G_BRJT src operand 1 must be a jump table index", MI);
1398 const auto &IdxOp = MI->getOperand(2);
1399 if (!IdxOp.isReg() || MRI->getType(IdxOp.getReg()).isPointer())
1400 report("G_BRJT src operand 2 must be a scalar reg type", MI);
1401 break;
1403 case TargetOpcode::G_INTRINSIC:
1404 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS: {
1405 // TODO: Should verify number of def and use operands, but the current
1406 // interface requires passing in IR types for mangling.
1407 const MachineOperand &IntrIDOp = MI->getOperand(MI->getNumExplicitDefs());
1408 if (!IntrIDOp.isIntrinsicID()) {
1409 report("G_INTRINSIC first src operand must be an intrinsic ID", MI);
1410 break;
1413 bool NoSideEffects = MI->getOpcode() == TargetOpcode::G_INTRINSIC;
1414 unsigned IntrID = IntrIDOp.getIntrinsicID();
1415 if (IntrID != 0 && IntrID < Intrinsic::num_intrinsics) {
1416 AttributeList Attrs
1417 = Intrinsic::getAttributes(MF->getFunction().getContext(),
1418 static_cast<Intrinsic::ID>(IntrID));
1419 bool DeclHasSideEffects = !Attrs.hasFnAttr(Attribute::ReadNone);
1420 if (NoSideEffects && DeclHasSideEffects) {
1421 report("G_INTRINSIC used with intrinsic that accesses memory", MI);
1422 break;
1424 if (!NoSideEffects && !DeclHasSideEffects) {
1425 report("G_INTRINSIC_W_SIDE_EFFECTS used with readnone intrinsic", MI);
1426 break;
1430 break;
1432 case TargetOpcode::G_SEXT_INREG: {
1433 if (!MI->getOperand(2).isImm()) {
1434 report("G_SEXT_INREG expects an immediate operand #2", MI);
1435 break;
1438 LLT SrcTy = MRI->getType(MI->getOperand(1).getReg());
1439 int64_t Imm = MI->getOperand(2).getImm();
1440 if (Imm <= 0)
1441 report("G_SEXT_INREG size must be >= 1", MI);
1442 if (Imm >= SrcTy.getScalarSizeInBits())
1443 report("G_SEXT_INREG size must be less than source bit width", MI);
1444 break;
1446 case TargetOpcode::G_SHUFFLE_VECTOR: {
1447 const MachineOperand &MaskOp = MI->getOperand(3);
1448 if (!MaskOp.isShuffleMask()) {
1449 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI);
1450 break;
1453 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1454 LLT Src0Ty = MRI->getType(MI->getOperand(1).getReg());
1455 LLT Src1Ty = MRI->getType(MI->getOperand(2).getReg());
1457 if (Src0Ty != Src1Ty)
1458 report("Source operands must be the same type", MI);
1460 if (Src0Ty.getScalarType() != DstTy.getScalarType())
1461 report("G_SHUFFLE_VECTOR cannot change element type", MI);
1463 // Don't check that all operands are vector because scalars are used in
1464 // place of 1 element vectors.
1465 int SrcNumElts = Src0Ty.isVector() ? Src0Ty.getNumElements() : 1;
1466 int DstNumElts = DstTy.isVector() ? DstTy.getNumElements() : 1;
1468 ArrayRef<int> MaskIdxes = MaskOp.getShuffleMask();
1470 if (static_cast<int>(MaskIdxes.size()) != DstNumElts)
1471 report("Wrong result type for shufflemask", MI);
1473 for (int Idx : MaskIdxes) {
1474 if (Idx < 0)
1475 continue;
1477 if (Idx >= 2 * SrcNumElts)
1478 report("Out of bounds shuffle index", MI);
1481 break;
1483 case TargetOpcode::G_DYN_STACKALLOC: {
1484 const MachineOperand &DstOp = MI->getOperand(0);
1485 const MachineOperand &AllocOp = MI->getOperand(1);
1486 const MachineOperand &AlignOp = MI->getOperand(2);
1488 if (!DstOp.isReg() || !MRI->getType(DstOp.getReg()).isPointer()) {
1489 report("dst operand 0 must be a pointer type", MI);
1490 break;
1493 if (!AllocOp.isReg() || !MRI->getType(AllocOp.getReg()).isScalar()) {
1494 report("src operand 1 must be a scalar reg type", MI);
1495 break;
1498 if (!AlignOp.isImm()) {
1499 report("src operand 2 must be an immediate type", MI);
1500 break;
1502 break;
1504 case TargetOpcode::G_MEMCPY_INLINE:
1505 case TargetOpcode::G_MEMCPY:
1506 case TargetOpcode::G_MEMMOVE: {
1507 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1508 if (MMOs.size() != 2) {
1509 report("memcpy/memmove must have 2 memory operands", MI);
1510 break;
1513 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad()) ||
1514 (MMOs[1]->isStore() || !MMOs[1]->isLoad())) {
1515 report("wrong memory operand types", MI);
1516 break;
1519 if (MMOs[0]->getSize() != MMOs[1]->getSize())
1520 report("inconsistent memory operand sizes", MI);
1522 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1523 LLT SrcPtrTy = MRI->getType(MI->getOperand(1).getReg());
1525 if (!DstPtrTy.isPointer() || !SrcPtrTy.isPointer()) {
1526 report("memory instruction operand must be a pointer", MI);
1527 break;
1530 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1531 report("inconsistent store address space", MI);
1532 if (SrcPtrTy.getAddressSpace() != MMOs[1]->getAddrSpace())
1533 report("inconsistent load address space", MI);
1535 if (Opc != TargetOpcode::G_MEMCPY_INLINE)
1536 if (!MI->getOperand(3).isImm() || (MI->getOperand(3).getImm() & ~1LL))
1537 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI);
1539 break;
1541 case TargetOpcode::G_BZERO:
1542 case TargetOpcode::G_MEMSET: {
1543 ArrayRef<MachineMemOperand *> MMOs = MI->memoperands();
1544 std::string Name = Opc == TargetOpcode::G_MEMSET ? "memset" : "bzero";
1545 if (MMOs.size() != 1) {
1546 report(Twine(Name, " must have 1 memory operand"), MI);
1547 break;
1550 if ((!MMOs[0]->isStore() || MMOs[0]->isLoad())) {
1551 report(Twine(Name, " memory operand must be a store"), MI);
1552 break;
1555 LLT DstPtrTy = MRI->getType(MI->getOperand(0).getReg());
1556 if (!DstPtrTy.isPointer()) {
1557 report(Twine(Name, " operand must be a pointer"), MI);
1558 break;
1561 if (DstPtrTy.getAddressSpace() != MMOs[0]->getAddrSpace())
1562 report("inconsistent " + Twine(Name, " address space"), MI);
1564 if (!MI->getOperand(MI->getNumOperands() - 1).isImm() ||
1565 (MI->getOperand(MI->getNumOperands() - 1).getImm() & ~1LL))
1566 report("'tail' flag (last operand) must be an immediate 0 or 1", MI);
1568 break;
1570 case TargetOpcode::G_VECREDUCE_SEQ_FADD:
1571 case TargetOpcode::G_VECREDUCE_SEQ_FMUL: {
1572 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1573 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1574 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1575 if (!DstTy.isScalar())
1576 report("Vector reduction requires a scalar destination type", MI);
1577 if (!Src1Ty.isScalar())
1578 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI);
1579 if (!Src2Ty.isVector())
1580 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI);
1581 break;
1583 case TargetOpcode::G_VECREDUCE_FADD:
1584 case TargetOpcode::G_VECREDUCE_FMUL:
1585 case TargetOpcode::G_VECREDUCE_FMAX:
1586 case TargetOpcode::G_VECREDUCE_FMIN:
1587 case TargetOpcode::G_VECREDUCE_ADD:
1588 case TargetOpcode::G_VECREDUCE_MUL:
1589 case TargetOpcode::G_VECREDUCE_AND:
1590 case TargetOpcode::G_VECREDUCE_OR:
1591 case TargetOpcode::G_VECREDUCE_XOR:
1592 case TargetOpcode::G_VECREDUCE_SMAX:
1593 case TargetOpcode::G_VECREDUCE_SMIN:
1594 case TargetOpcode::G_VECREDUCE_UMAX:
1595 case TargetOpcode::G_VECREDUCE_UMIN: {
1596 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1597 if (!DstTy.isScalar())
1598 report("Vector reduction requires a scalar destination type", MI);
1599 break;
1602 case TargetOpcode::G_SBFX:
1603 case TargetOpcode::G_UBFX: {
1604 LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
1605 if (DstTy.isVector()) {
1606 report("Bitfield extraction is not supported on vectors", MI);
1607 break;
1609 break;
1611 case TargetOpcode::G_SHL:
1612 case TargetOpcode::G_LSHR:
1613 case TargetOpcode::G_ASHR:
1614 case TargetOpcode::G_ROTR:
1615 case TargetOpcode::G_ROTL: {
1616 LLT Src1Ty = MRI->getType(MI->getOperand(1).getReg());
1617 LLT Src2Ty = MRI->getType(MI->getOperand(2).getReg());
1618 if (Src1Ty.isVector() != Src2Ty.isVector()) {
1619 report("Shifts and rotates require operands to be either all scalars or "
1620 "all vectors",
1621 MI);
1622 break;
1624 break;
1626 case TargetOpcode::G_LLROUND:
1627 case TargetOpcode::G_LROUND: {
1628 verifyAllRegOpsScalar(*MI, *MRI);
1629 break;
1631 default:
1632 break;
1636 void MachineVerifier::visitMachineInstrBefore(const MachineInstr *MI) {
1637 const MCInstrDesc &MCID = MI->getDesc();
1638 if (MI->getNumOperands() < MCID.getNumOperands()) {
1639 report("Too few operands", MI);
1640 errs() << MCID.getNumOperands() << " operands expected, but "
1641 << MI->getNumOperands() << " given.\n";
1644 if (MI->isPHI()) {
1645 if (MF->getProperties().hasProperty(
1646 MachineFunctionProperties::Property::NoPHIs))
1647 report("Found PHI instruction with NoPHIs property set", MI);
1649 if (FirstNonPHI)
1650 report("Found PHI instruction after non-PHI", MI);
1651 } else if (FirstNonPHI == nullptr)
1652 FirstNonPHI = MI;
1654 // Check the tied operands.
1655 if (MI->isInlineAsm())
1656 verifyInlineAsm(MI);
1658 // Check that unspillable terminators define a reg and have at most one use.
1659 if (TII->isUnspillableTerminator(MI)) {
1660 if (!MI->getOperand(0).isReg() || !MI->getOperand(0).isDef())
1661 report("Unspillable Terminator does not define a reg", MI);
1662 Register Def = MI->getOperand(0).getReg();
1663 if (Def.isVirtual() &&
1664 !MF->getProperties().hasProperty(
1665 MachineFunctionProperties::Property::NoPHIs) &&
1666 std::distance(MRI->use_nodbg_begin(Def), MRI->use_nodbg_end()) > 1)
1667 report("Unspillable Terminator expected to have at most one use!", MI);
1670 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1671 // DBG_VALUEs: these are convenient to use in tests, but should never get
1672 // generated.
1673 if (MI->isDebugValue() && MI->getNumOperands() == 4)
1674 if (!MI->getDebugLoc())
1675 report("Missing DebugLoc for debug instruction", MI);
1677 // Meta instructions should never be the subject of debug value tracking,
1678 // they don't create a value in the output program at all.
1679 if (MI->isMetaInstruction() && MI->peekDebugInstrNum())
1680 report("Metadata instruction should not have a value tracking number", MI);
1682 // Check the MachineMemOperands for basic consistency.
1683 for (MachineMemOperand *Op : MI->memoperands()) {
1684 if (Op->isLoad() && !MI->mayLoad())
1685 report("Missing mayLoad flag", MI);
1686 if (Op->isStore() && !MI->mayStore())
1687 report("Missing mayStore flag", MI);
1690 // Debug values must not have a slot index.
1691 // Other instructions must have one, unless they are inside a bundle.
1692 if (LiveInts) {
1693 bool mapped = !LiveInts->isNotInMIMap(*MI);
1694 if (MI->isDebugOrPseudoInstr()) {
1695 if (mapped)
1696 report("Debug instruction has a slot index", MI);
1697 } else if (MI->isInsideBundle()) {
1698 if (mapped)
1699 report("Instruction inside bundle has a slot index", MI);
1700 } else {
1701 if (!mapped)
1702 report("Missing slot index", MI);
1706 unsigned Opc = MCID.getOpcode();
1707 if (isPreISelGenericOpcode(Opc) || isPreISelGenericOptimizationHint(Opc)) {
1708 verifyPreISelGenericInstruction(MI);
1709 return;
1712 StringRef ErrorInfo;
1713 if (!TII->verifyInstruction(*MI, ErrorInfo))
1714 report(ErrorInfo.data(), MI);
1716 // Verify properties of various specific instruction types
1717 switch (MI->getOpcode()) {
1718 case TargetOpcode::COPY: {
1719 const MachineOperand &DstOp = MI->getOperand(0);
1720 const MachineOperand &SrcOp = MI->getOperand(1);
1721 const Register SrcReg = SrcOp.getReg();
1722 const Register DstReg = DstOp.getReg();
1724 LLT DstTy = MRI->getType(DstReg);
1725 LLT SrcTy = MRI->getType(SrcReg);
1726 if (SrcTy.isValid() && DstTy.isValid()) {
1727 // If both types are valid, check that the types are the same.
1728 if (SrcTy != DstTy) {
1729 report("Copy Instruction is illegal with mismatching types", MI);
1730 errs() << "Def = " << DstTy << ", Src = " << SrcTy << "\n";
1733 break;
1736 if (!SrcTy.isValid() && !DstTy.isValid())
1737 break;
1739 // If we have only one valid type, this is likely a copy between a virtual
1740 // and physical register.
1741 unsigned SrcSize = 0;
1742 unsigned DstSize = 0;
1743 if (SrcReg.isPhysical() && DstTy.isValid()) {
1744 const TargetRegisterClass *SrcRC =
1745 TRI->getMinimalPhysRegClassLLT(SrcReg, DstTy);
1746 if (SrcRC)
1747 SrcSize = TRI->getRegSizeInBits(*SrcRC);
1750 if (SrcSize == 0)
1751 SrcSize = TRI->getRegSizeInBits(SrcReg, *MRI);
1753 if (DstReg.isPhysical() && SrcTy.isValid()) {
1754 const TargetRegisterClass *DstRC =
1755 TRI->getMinimalPhysRegClassLLT(DstReg, SrcTy);
1756 if (DstRC)
1757 DstSize = TRI->getRegSizeInBits(*DstRC);
1760 if (DstSize == 0)
1761 DstSize = TRI->getRegSizeInBits(DstReg, *MRI);
1763 if (SrcSize != 0 && DstSize != 0 && SrcSize != DstSize) {
1764 if (!DstOp.getSubReg() && !SrcOp.getSubReg()) {
1765 report("Copy Instruction is illegal with mismatching sizes", MI);
1766 errs() << "Def Size = " << DstSize << ", Src Size = " << SrcSize
1767 << "\n";
1770 break;
1772 case TargetOpcode::STATEPOINT: {
1773 StatepointOpers SO(MI);
1774 if (!MI->getOperand(SO.getIDPos()).isImm() ||
1775 !MI->getOperand(SO.getNBytesPos()).isImm() ||
1776 !MI->getOperand(SO.getNCallArgsPos()).isImm()) {
1777 report("meta operands to STATEPOINT not constant!", MI);
1778 break;
1781 auto VerifyStackMapConstant = [&](unsigned Offset) {
1782 if (Offset >= MI->getNumOperands()) {
1783 report("stack map constant to STATEPOINT is out of range!", MI);
1784 return;
1786 if (!MI->getOperand(Offset - 1).isImm() ||
1787 MI->getOperand(Offset - 1).getImm() != StackMaps::ConstantOp ||
1788 !MI->getOperand(Offset).isImm())
1789 report("stack map constant to STATEPOINT not well formed!", MI);
1791 VerifyStackMapConstant(SO.getCCIdx());
1792 VerifyStackMapConstant(SO.getFlagsIdx());
1793 VerifyStackMapConstant(SO.getNumDeoptArgsIdx());
1794 VerifyStackMapConstant(SO.getNumGCPtrIdx());
1795 VerifyStackMapConstant(SO.getNumAllocaIdx());
1796 VerifyStackMapConstant(SO.getNumGcMapEntriesIdx());
1798 // Verify that all explicit statepoint defs are tied to gc operands as
1799 // they are expected to be a relocation of gc operands.
1800 unsigned FirstGCPtrIdx = SO.getFirstGCPtrIdx();
1801 unsigned LastGCPtrIdx = SO.getNumAllocaIdx() - 2;
1802 for (unsigned Idx = 0; Idx < MI->getNumDefs(); Idx++) {
1803 unsigned UseOpIdx;
1804 if (!MI->isRegTiedToUseOperand(Idx, &UseOpIdx)) {
1805 report("STATEPOINT defs expected to be tied", MI);
1806 break;
1808 if (UseOpIdx < FirstGCPtrIdx || UseOpIdx > LastGCPtrIdx) {
1809 report("STATEPOINT def tied to non-gc operand", MI);
1810 break;
1814 // TODO: verify we have properly encoded deopt arguments
1815 } break;
1816 case TargetOpcode::INSERT_SUBREG: {
1817 unsigned InsertedSize;
1818 if (unsigned SubIdx = MI->getOperand(2).getSubReg())
1819 InsertedSize = TRI->getSubRegIdxSize(SubIdx);
1820 else
1821 InsertedSize = TRI->getRegSizeInBits(MI->getOperand(2).getReg(), *MRI);
1822 unsigned SubRegSize = TRI->getSubRegIdxSize(MI->getOperand(3).getImm());
1823 if (SubRegSize < InsertedSize) {
1824 report("INSERT_SUBREG expected inserted value to have equal or lesser "
1825 "size than the subreg it was inserted into", MI);
1826 break;
1828 } break;
1832 void
1833 MachineVerifier::visitMachineOperand(const MachineOperand *MO, unsigned MONum) {
1834 const MachineInstr *MI = MO->getParent();
1835 const MCInstrDesc &MCID = MI->getDesc();
1836 unsigned NumDefs = MCID.getNumDefs();
1837 if (MCID.getOpcode() == TargetOpcode::PATCHPOINT)
1838 NumDefs = (MONum == 0 && MO->isReg()) ? NumDefs : 0;
1840 // The first MCID.NumDefs operands must be explicit register defines
1841 if (MONum < NumDefs) {
1842 const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1843 if (!MO->isReg())
1844 report("Explicit definition must be a register", MO, MONum);
1845 else if (!MO->isDef() && !MCOI.isOptionalDef())
1846 report("Explicit definition marked as use", MO, MONum);
1847 else if (MO->isImplicit())
1848 report("Explicit definition marked as implicit", MO, MONum);
1849 } else if (MONum < MCID.getNumOperands()) {
1850 const MCOperandInfo &MCOI = MCID.OpInfo[MONum];
1851 // Don't check if it's the last operand in a variadic instruction. See,
1852 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
1853 bool IsOptional = MI->isVariadic() && MONum == MCID.getNumOperands() - 1;
1854 if (!IsOptional) {
1855 if (MO->isReg()) {
1856 if (MO->isDef() && !MCOI.isOptionalDef() && !MCID.variadicOpsAreDefs())
1857 report("Explicit operand marked as def", MO, MONum);
1858 if (MO->isImplicit())
1859 report("Explicit operand marked as implicit", MO, MONum);
1862 // Check that an instruction has register operands only as expected.
1863 if (MCOI.OperandType == MCOI::OPERAND_REGISTER &&
1864 !MO->isReg() && !MO->isFI())
1865 report("Expected a register operand.", MO, MONum);
1866 if (MO->isReg()) {
1867 if (MCOI.OperandType == MCOI::OPERAND_IMMEDIATE ||
1868 (MCOI.OperandType == MCOI::OPERAND_PCREL &&
1869 !TII->isPCRelRegisterOperandLegal(*MO)))
1870 report("Expected a non-register operand.", MO, MONum);
1874 int TiedTo = MCID.getOperandConstraint(MONum, MCOI::TIED_TO);
1875 if (TiedTo != -1) {
1876 if (!MO->isReg())
1877 report("Tied use must be a register", MO, MONum);
1878 else if (!MO->isTied())
1879 report("Operand should be tied", MO, MONum);
1880 else if (unsigned(TiedTo) != MI->findTiedOperandIdx(MONum))
1881 report("Tied def doesn't match MCInstrDesc", MO, MONum);
1882 else if (Register::isPhysicalRegister(MO->getReg())) {
1883 const MachineOperand &MOTied = MI->getOperand(TiedTo);
1884 if (!MOTied.isReg())
1885 report("Tied counterpart must be a register", &MOTied, TiedTo);
1886 else if (Register::isPhysicalRegister(MOTied.getReg()) &&
1887 MO->getReg() != MOTied.getReg())
1888 report("Tied physical registers must match.", &MOTied, TiedTo);
1890 } else if (MO->isReg() && MO->isTied())
1891 report("Explicit operand should not be tied", MO, MONum);
1892 } else {
1893 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
1894 if (MO->isReg() && !MO->isImplicit() && !MI->isVariadic() && MO->getReg())
1895 report("Extra explicit operand on non-variadic instruction", MO, MONum);
1898 switch (MO->getType()) {
1899 case MachineOperand::MO_Register: {
1900 // Verify debug flag on debug instructions. Check this first because reg0
1901 // indicates an undefined debug value.
1902 if (MI->isDebugInstr() && MO->isUse()) {
1903 if (!MO->isDebug())
1904 report("Register operand must be marked debug", MO, MONum);
1905 } else if (MO->isDebug()) {
1906 report("Register operand must not be marked debug", MO, MONum);
1909 const Register Reg = MO->getReg();
1910 if (!Reg)
1911 return;
1912 if (MRI->tracksLiveness() && !MI->isDebugValue())
1913 checkLiveness(MO, MONum);
1915 // Verify the consistency of tied operands.
1916 if (MO->isTied()) {
1917 unsigned OtherIdx = MI->findTiedOperandIdx(MONum);
1918 const MachineOperand &OtherMO = MI->getOperand(OtherIdx);
1919 if (!OtherMO.isReg())
1920 report("Must be tied to a register", MO, MONum);
1921 if (!OtherMO.isTied())
1922 report("Missing tie flags on tied operand", MO, MONum);
1923 if (MI->findTiedOperandIdx(OtherIdx) != MONum)
1924 report("Inconsistent tie links", MO, MONum);
1925 if (MONum < MCID.getNumDefs()) {
1926 if (OtherIdx < MCID.getNumOperands()) {
1927 if (-1 == MCID.getOperandConstraint(OtherIdx, MCOI::TIED_TO))
1928 report("Explicit def tied to explicit use without tie constraint",
1929 MO, MONum);
1930 } else {
1931 if (!OtherMO.isImplicit())
1932 report("Explicit def should be tied to implicit use", MO, MONum);
1937 // Verify two-address constraints after the twoaddressinstruction pass.
1938 // Both twoaddressinstruction pass and phi-node-elimination pass call
1939 // MRI->leaveSSA() to set MF as NoSSA, we should do the verification after
1940 // twoaddressinstruction pass not after phi-node-elimination pass. So we
1941 // shouldn't use the NoSSA as the condition, we should based on
1942 // TiedOpsRewritten property to verify two-address constraints, this
1943 // property will be set in twoaddressinstruction pass.
1944 unsigned DefIdx;
1945 if (MF->getProperties().hasProperty(
1946 MachineFunctionProperties::Property::TiedOpsRewritten) &&
1947 MO->isUse() && MI->isRegTiedToDefOperand(MONum, &DefIdx) &&
1948 Reg != MI->getOperand(DefIdx).getReg())
1949 report("Two-address instruction operands must be identical", MO, MONum);
1951 // Check register classes.
1952 unsigned SubIdx = MO->getSubReg();
1954 if (Register::isPhysicalRegister(Reg)) {
1955 if (SubIdx) {
1956 report("Illegal subregister index for physical register", MO, MONum);
1957 return;
1959 if (MONum < MCID.getNumOperands()) {
1960 if (const TargetRegisterClass *DRC =
1961 TII->getRegClass(MCID, MONum, TRI, *MF)) {
1962 if (!DRC->contains(Reg)) {
1963 report("Illegal physical register for instruction", MO, MONum);
1964 errs() << printReg(Reg, TRI) << " is not a "
1965 << TRI->getRegClassName(DRC) << " register.\n";
1969 if (MO->isRenamable()) {
1970 if (MRI->isReserved(Reg)) {
1971 report("isRenamable set on reserved register", MO, MONum);
1972 return;
1975 } else {
1976 // Virtual register.
1977 const TargetRegisterClass *RC = MRI->getRegClassOrNull(Reg);
1978 if (!RC) {
1979 // This is a generic virtual register.
1981 // Do not allow undef uses for generic virtual registers. This ensures
1982 // getVRegDef can never fail and return null on a generic register.
1984 // FIXME: This restriction should probably be broadened to all SSA
1985 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
1986 // run on the SSA function just before phi elimination.
1987 if (MO->isUndef())
1988 report("Generic virtual register use cannot be undef", MO, MONum);
1990 // Debug value instruction is permitted to use undefined vregs.
1991 // This is a performance measure to skip the overhead of immediately
1992 // pruning unused debug operands. The final undef substitution occurs
1993 // when debug values are allocated in LDVImpl::handleDebugValue, so
1994 // these verifications always apply after this pass.
1995 if (isFunctionTracksDebugUserValues || !MO->isUse() ||
1996 !MI->isDebugValue() || !MRI->def_empty(Reg)) {
1997 // If we're post-Select, we can't have gvregs anymore.
1998 if (isFunctionSelected) {
1999 report("Generic virtual register invalid in a Selected function",
2000 MO, MONum);
2001 return;
2004 // The gvreg must have a type and it must not have a SubIdx.
2005 LLT Ty = MRI->getType(Reg);
2006 if (!Ty.isValid()) {
2007 report("Generic virtual register must have a valid type", MO,
2008 MONum);
2009 return;
2012 const RegisterBank *RegBank = MRI->getRegBankOrNull(Reg);
2014 // If we're post-RegBankSelect, the gvreg must have a bank.
2015 if (!RegBank && isFunctionRegBankSelected) {
2016 report("Generic virtual register must have a bank in a "
2017 "RegBankSelected function",
2018 MO, MONum);
2019 return;
2022 // Make sure the register fits into its register bank if any.
2023 if (RegBank && Ty.isValid() &&
2024 RegBank->getSize() < Ty.getSizeInBits()) {
2025 report("Register bank is too small for virtual register", MO,
2026 MONum);
2027 errs() << "Register bank " << RegBank->getName() << " too small("
2028 << RegBank->getSize() << ") to fit " << Ty.getSizeInBits()
2029 << "-bits\n";
2030 return;
2034 if (SubIdx) {
2035 report("Generic virtual register does not allow subregister index", MO,
2036 MONum);
2037 return;
2040 // If this is a target specific instruction and this operand
2041 // has register class constraint, the virtual register must
2042 // comply to it.
2043 if (!isPreISelGenericOpcode(MCID.getOpcode()) &&
2044 MONum < MCID.getNumOperands() &&
2045 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2046 report("Virtual register does not match instruction constraint", MO,
2047 MONum);
2048 errs() << "Expect register class "
2049 << TRI->getRegClassName(
2050 TII->getRegClass(MCID, MONum, TRI, *MF))
2051 << " but got nothing\n";
2052 return;
2055 break;
2057 if (SubIdx) {
2058 const TargetRegisterClass *SRC =
2059 TRI->getSubClassWithSubReg(RC, SubIdx);
2060 if (!SRC) {
2061 report("Invalid subregister index for virtual register", MO, MONum);
2062 errs() << "Register class " << TRI->getRegClassName(RC)
2063 << " does not support subreg index " << SubIdx << "\n";
2064 return;
2066 if (RC != SRC) {
2067 report("Invalid register class for subregister index", MO, MONum);
2068 errs() << "Register class " << TRI->getRegClassName(RC)
2069 << " does not fully support subreg index " << SubIdx << "\n";
2070 return;
2073 if (MONum < MCID.getNumOperands()) {
2074 if (const TargetRegisterClass *DRC =
2075 TII->getRegClass(MCID, MONum, TRI, *MF)) {
2076 if (SubIdx) {
2077 const TargetRegisterClass *SuperRC =
2078 TRI->getLargestLegalSuperClass(RC, *MF);
2079 if (!SuperRC) {
2080 report("No largest legal super class exists.", MO, MONum);
2081 return;
2083 DRC = TRI->getMatchingSuperRegClass(SuperRC, DRC, SubIdx);
2084 if (!DRC) {
2085 report("No matching super-reg register class.", MO, MONum);
2086 return;
2089 if (!RC->hasSuperClassEq(DRC)) {
2090 report("Illegal virtual register for instruction", MO, MONum);
2091 errs() << "Expected a " << TRI->getRegClassName(DRC)
2092 << " register, but got a " << TRI->getRegClassName(RC)
2093 << " register\n";
2098 break;
2101 case MachineOperand::MO_RegisterMask:
2102 regMasks.push_back(MO->getRegMask());
2103 break;
2105 case MachineOperand::MO_MachineBasicBlock:
2106 if (MI->isPHI() && !MO->getMBB()->isSuccessor(MI->getParent()))
2107 report("PHI operand is not in the CFG", MO, MONum);
2108 break;
2110 case MachineOperand::MO_FrameIndex:
2111 if (LiveStks && LiveStks->hasInterval(MO->getIndex()) &&
2112 LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2113 int FI = MO->getIndex();
2114 LiveInterval &LI = LiveStks->getInterval(FI);
2115 SlotIndex Idx = LiveInts->getInstructionIndex(*MI);
2117 bool stores = MI->mayStore();
2118 bool loads = MI->mayLoad();
2119 // For a memory-to-memory move, we need to check if the frame
2120 // index is used for storing or loading, by inspecting the
2121 // memory operands.
2122 if (stores && loads) {
2123 for (auto *MMO : MI->memoperands()) {
2124 const PseudoSourceValue *PSV = MMO->getPseudoValue();
2125 if (PSV == nullptr) continue;
2126 const FixedStackPseudoSourceValue *Value =
2127 dyn_cast<FixedStackPseudoSourceValue>(PSV);
2128 if (Value == nullptr) continue;
2129 if (Value->getFrameIndex() != FI) continue;
2131 if (MMO->isStore())
2132 loads = false;
2133 else
2134 stores = false;
2135 break;
2137 if (loads == stores)
2138 report("Missing fixed stack memoperand.", MI);
2140 if (loads && !LI.liveAt(Idx.getRegSlot(true))) {
2141 report("Instruction loads from dead spill slot", MO, MONum);
2142 errs() << "Live stack: " << LI << '\n';
2144 if (stores && !LI.liveAt(Idx.getRegSlot())) {
2145 report("Instruction stores to dead spill slot", MO, MONum);
2146 errs() << "Live stack: " << LI << '\n';
2149 break;
2151 default:
2152 break;
2156 void MachineVerifier::checkLivenessAtUse(const MachineOperand *MO,
2157 unsigned MONum, SlotIndex UseIdx,
2158 const LiveRange &LR,
2159 Register VRegOrUnit,
2160 LaneBitmask LaneMask) {
2161 LiveQueryResult LRQ = LR.Query(UseIdx);
2162 // Check if we have a segment at the use, note however that we only need one
2163 // live subregister range, the others may be dead.
2164 if (!LRQ.valueIn() && LaneMask.none()) {
2165 report("No live segment at use", MO, MONum);
2166 report_context_liverange(LR);
2167 report_context_vreg_regunit(VRegOrUnit);
2168 report_context(UseIdx);
2170 if (MO->isKill() && !LRQ.isKill()) {
2171 report("Live range continues after kill flag", MO, MONum);
2172 report_context_liverange(LR);
2173 report_context_vreg_regunit(VRegOrUnit);
2174 if (LaneMask.any())
2175 report_context_lanemask(LaneMask);
2176 report_context(UseIdx);
2180 void MachineVerifier::checkLivenessAtDef(const MachineOperand *MO,
2181 unsigned MONum, SlotIndex DefIdx,
2182 const LiveRange &LR,
2183 Register VRegOrUnit,
2184 bool SubRangeCheck,
2185 LaneBitmask LaneMask) {
2186 if (const VNInfo *VNI = LR.getVNInfoAt(DefIdx)) {
2187 assert(VNI && "NULL valno is not allowed");
2188 if (VNI->def != DefIdx) {
2189 report("Inconsistent valno->def", MO, MONum);
2190 report_context_liverange(LR);
2191 report_context_vreg_regunit(VRegOrUnit);
2192 if (LaneMask.any())
2193 report_context_lanemask(LaneMask);
2194 report_context(*VNI);
2195 report_context(DefIdx);
2197 } else {
2198 report("No live segment at def", MO, MONum);
2199 report_context_liverange(LR);
2200 report_context_vreg_regunit(VRegOrUnit);
2201 if (LaneMask.any())
2202 report_context_lanemask(LaneMask);
2203 report_context(DefIdx);
2205 // Check that, if the dead def flag is present, LiveInts agree.
2206 if (MO->isDead()) {
2207 LiveQueryResult LRQ = LR.Query(DefIdx);
2208 if (!LRQ.isDeadDef()) {
2209 assert(Register::isVirtualRegister(VRegOrUnit) &&
2210 "Expecting a virtual register.");
2211 // A dead subreg def only tells us that the specific subreg is dead. There
2212 // could be other non-dead defs of other subregs, or we could have other
2213 // parts of the register being live through the instruction. So unless we
2214 // are checking liveness for a subrange it is ok for the live range to
2215 // continue, given that we have a dead def of a subregister.
2216 if (SubRangeCheck || MO->getSubReg() == 0) {
2217 report("Live range continues after dead def flag", MO, MONum);
2218 report_context_liverange(LR);
2219 report_context_vreg_regunit(VRegOrUnit);
2220 if (LaneMask.any())
2221 report_context_lanemask(LaneMask);
2227 void MachineVerifier::checkLiveness(const MachineOperand *MO, unsigned MONum) {
2228 const MachineInstr *MI = MO->getParent();
2229 const Register Reg = MO->getReg();
2230 const unsigned SubRegIdx = MO->getSubReg();
2232 const LiveInterval *LI = nullptr;
2233 if (LiveInts && Reg.isVirtual()) {
2234 if (LiveInts->hasInterval(Reg)) {
2235 LI = &LiveInts->getInterval(Reg);
2236 if (SubRegIdx != 0 && (MO->isDef() || !MO->isUndef()) && !LI->empty() &&
2237 !LI->hasSubRanges() && MRI->shouldTrackSubRegLiveness(Reg))
2238 report("Live interval for subreg operand has no subranges", MO, MONum);
2239 } else {
2240 report("Virtual register has no live interval", MO, MONum);
2244 // Both use and def operands can read a register.
2245 if (MO->readsReg()) {
2246 if (MO->isKill())
2247 addRegWithSubRegs(regsKilled, Reg);
2249 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2250 // which case we have already checked that LiveVars knows any kills on the
2251 // bundle header instead).
2252 if (LiveVars && Reg.isVirtual() && MO->isKill() &&
2253 !MI->isBundledWithPred()) {
2254 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2255 if (!is_contained(VI.Kills, MI))
2256 report("Kill missing from LiveVariables", MO, MONum);
2259 // Check LiveInts liveness and kill.
2260 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2261 SlotIndex UseIdx = LiveInts->getInstructionIndex(*MI);
2262 // Check the cached regunit intervals.
2263 if (Reg.isPhysical() && !isReserved(Reg)) {
2264 for (MCRegUnitIterator Units(Reg.asMCReg(), TRI); Units.isValid();
2265 ++Units) {
2266 if (MRI->isReservedRegUnit(*Units))
2267 continue;
2268 if (const LiveRange *LR = LiveInts->getCachedRegUnit(*Units))
2269 checkLivenessAtUse(MO, MONum, UseIdx, *LR, *Units);
2273 if (Reg.isVirtual()) {
2274 // This is a virtual register interval.
2275 checkLivenessAtUse(MO, MONum, UseIdx, *LI, Reg);
2277 if (LI->hasSubRanges() && !MO->isDef()) {
2278 LaneBitmask MOMask = SubRegIdx != 0
2279 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2280 : MRI->getMaxLaneMaskForVReg(Reg);
2281 LaneBitmask LiveInMask;
2282 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2283 if ((MOMask & SR.LaneMask).none())
2284 continue;
2285 checkLivenessAtUse(MO, MONum, UseIdx, SR, Reg, SR.LaneMask);
2286 LiveQueryResult LRQ = SR.Query(UseIdx);
2287 if (LRQ.valueIn())
2288 LiveInMask |= SR.LaneMask;
2290 // At least parts of the register has to be live at the use.
2291 if ((LiveInMask & MOMask).none()) {
2292 report("No live subrange at use", MO, MONum);
2293 report_context(*LI);
2294 report_context(UseIdx);
2300 // Use of a dead register.
2301 if (!regsLive.count(Reg)) {
2302 if (Reg.isPhysical()) {
2303 // Reserved registers may be used even when 'dead'.
2304 bool Bad = !isReserved(Reg);
2305 // We are fine if just any subregister has a defined value.
2306 if (Bad) {
2308 for (const MCPhysReg &SubReg : TRI->subregs(Reg)) {
2309 if (regsLive.count(SubReg)) {
2310 Bad = false;
2311 break;
2315 // If there is an additional implicit-use of a super register we stop
2316 // here. By definition we are fine if the super register is not
2317 // (completely) dead, if the complete super register is dead we will
2318 // get a report for its operand.
2319 if (Bad) {
2320 for (const MachineOperand &MOP : MI->uses()) {
2321 if (!MOP.isReg() || !MOP.isImplicit())
2322 continue;
2324 if (!MOP.getReg().isPhysical())
2325 continue;
2327 if (llvm::is_contained(TRI->subregs(MOP.getReg()), Reg))
2328 Bad = false;
2331 if (Bad)
2332 report("Using an undefined physical register", MO, MONum);
2333 } else if (MRI->def_empty(Reg)) {
2334 report("Reading virtual register without a def", MO, MONum);
2335 } else {
2336 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2337 // We don't know which virtual registers are live in, so only complain
2338 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2339 // must be live in. PHI instructions are handled separately.
2340 if (MInfo.regsKilled.count(Reg))
2341 report("Using a killed virtual register", MO, MONum);
2342 else if (!MI->isPHI())
2343 MInfo.vregsLiveIn.insert(std::make_pair(Reg, MI));
2348 if (MO->isDef()) {
2349 // Register defined.
2350 // TODO: verify that earlyclobber ops are not used.
2351 if (MO->isDead())
2352 addRegWithSubRegs(regsDead, Reg);
2353 else
2354 addRegWithSubRegs(regsDefined, Reg);
2356 // Verify SSA form.
2357 if (MRI->isSSA() && Reg.isVirtual() &&
2358 std::next(MRI->def_begin(Reg)) != MRI->def_end())
2359 report("Multiple virtual register defs in SSA form", MO, MONum);
2361 // Check LiveInts for a live segment, but only for virtual registers.
2362 if (LiveInts && !LiveInts->isNotInMIMap(*MI)) {
2363 SlotIndex DefIdx = LiveInts->getInstructionIndex(*MI);
2364 DefIdx = DefIdx.getRegSlot(MO->isEarlyClobber());
2366 if (Reg.isVirtual()) {
2367 checkLivenessAtDef(MO, MONum, DefIdx, *LI, Reg);
2369 if (LI->hasSubRanges()) {
2370 LaneBitmask MOMask = SubRegIdx != 0
2371 ? TRI->getSubRegIndexLaneMask(SubRegIdx)
2372 : MRI->getMaxLaneMaskForVReg(Reg);
2373 for (const LiveInterval::SubRange &SR : LI->subranges()) {
2374 if ((SR.LaneMask & MOMask).none())
2375 continue;
2376 checkLivenessAtDef(MO, MONum, DefIdx, SR, Reg, true, SR.LaneMask);
2384 // This function gets called after visiting all instructions in a bundle. The
2385 // argument points to the bundle header.
2386 // Normal stand-alone instructions are also considered 'bundles', and this
2387 // function is called for all of them.
2388 void MachineVerifier::visitMachineBundleAfter(const MachineInstr *MI) {
2389 BBInfo &MInfo = MBBInfoMap[MI->getParent()];
2390 set_union(MInfo.regsKilled, regsKilled);
2391 set_subtract(regsLive, regsKilled); regsKilled.clear();
2392 // Kill any masked registers.
2393 while (!regMasks.empty()) {
2394 const uint32_t *Mask = regMasks.pop_back_val();
2395 for (Register Reg : regsLive)
2396 if (Reg.isPhysical() &&
2397 MachineOperand::clobbersPhysReg(Mask, Reg.asMCReg()))
2398 regsDead.push_back(Reg);
2400 set_subtract(regsLive, regsDead); regsDead.clear();
2401 set_union(regsLive, regsDefined); regsDefined.clear();
2404 void
2405 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock *MBB) {
2406 MBBInfoMap[MBB].regsLiveOut = regsLive;
2407 regsLive.clear();
2409 if (Indexes) {
2410 SlotIndex stop = Indexes->getMBBEndIdx(MBB);
2411 if (!(stop > lastIndex)) {
2412 report("Block ends before last instruction index", MBB);
2413 errs() << "Block ends at " << stop
2414 << " last instruction was at " << lastIndex << '\n';
2416 lastIndex = stop;
2420 namespace {
2421 // This implements a set of registers that serves as a filter: can filter other
2422 // sets by passing through elements not in the filter and blocking those that
2423 // are. Any filter implicitly includes the full set of physical registers upon
2424 // creation, thus filtering them all out. The filter itself as a set only grows,
2425 // and needs to be as efficient as possible.
2426 struct VRegFilter {
2427 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2428 // no duplicates. Both virtual and physical registers are fine.
2429 template <typename RegSetT> void add(const RegSetT &FromRegSet) {
2430 SmallVector<Register, 0> VRegsBuffer;
2431 filterAndAdd(FromRegSet, VRegsBuffer);
2433 // Filter \p FromRegSet through the filter and append passed elements into \p
2434 // ToVRegs. All elements appended are then added to the filter itself.
2435 // \returns true if anything changed.
2436 template <typename RegSetT>
2437 bool filterAndAdd(const RegSetT &FromRegSet,
2438 SmallVectorImpl<Register> &ToVRegs) {
2439 unsigned SparseUniverse = Sparse.size();
2440 unsigned NewSparseUniverse = SparseUniverse;
2441 unsigned NewDenseSize = Dense.size();
2442 size_t Begin = ToVRegs.size();
2443 for (Register Reg : FromRegSet) {
2444 if (!Reg.isVirtual())
2445 continue;
2446 unsigned Index = Register::virtReg2Index(Reg);
2447 if (Index < SparseUniverseMax) {
2448 if (Index < SparseUniverse && Sparse.test(Index))
2449 continue;
2450 NewSparseUniverse = std::max(NewSparseUniverse, Index + 1);
2451 } else {
2452 if (Dense.count(Reg))
2453 continue;
2454 ++NewDenseSize;
2456 ToVRegs.push_back(Reg);
2458 size_t End = ToVRegs.size();
2459 if (Begin == End)
2460 return false;
2461 // Reserving space in sets once performs better than doing so continuously
2462 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2463 // tuned all the way down) and double iteration (the second one is over a
2464 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2465 Sparse.resize(NewSparseUniverse);
2466 Dense.reserve(NewDenseSize);
2467 for (unsigned I = Begin; I < End; ++I) {
2468 Register Reg = ToVRegs[I];
2469 unsigned Index = Register::virtReg2Index(Reg);
2470 if (Index < SparseUniverseMax)
2471 Sparse.set(Index);
2472 else
2473 Dense.insert(Reg);
2475 return true;
2478 private:
2479 static constexpr unsigned SparseUniverseMax = 10 * 1024 * 8;
2480 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2481 // are tracked by Dense. The only purpose of the threashold and the Dense set
2482 // is to have a reasonably growing memory usage in pathological cases (large
2483 // number of very sparse VRegFilter instances live at the same time). In
2484 // practice even in the worst-by-execution time cases having all elements
2485 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2486 // space efficient than if tracked by Dense. The threashold is set to keep the
2487 // worst-case memory usage within 2x of figures determined empirically for
2488 // "all Dense" scenario in such worst-by-execution-time cases.
2489 BitVector Sparse;
2490 DenseSet<unsigned> Dense;
2493 // Implements both a transfer function and a (binary, in-place) join operator
2494 // for a dataflow over register sets with set union join and filtering transfer
2495 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2496 // Maintains out_b as its state, allowing for O(n) iteration over it at any
2497 // time, where n is the size of the set (as opposed to O(U) where U is the
2498 // universe). filter_b implicitly contains all physical registers at all times.
2499 class FilteringVRegSet {
2500 VRegFilter Filter;
2501 SmallVector<Register, 0> VRegs;
2503 public:
2504 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2505 // Both virtual and physical registers are fine.
2506 template <typename RegSetT> void addToFilter(const RegSetT &RS) {
2507 Filter.add(RS);
2509 // Passes \p RS through the filter_b (transfer function) and adds what's left
2510 // to itself (out_b).
2511 template <typename RegSetT> bool add(const RegSetT &RS) {
2512 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2513 // a set union) just add everything being added here to the Filter as well.
2514 return Filter.filterAndAdd(RS, VRegs);
2516 using const_iterator = decltype(VRegs)::const_iterator;
2517 const_iterator begin() const { return VRegs.begin(); }
2518 const_iterator end() const { return VRegs.end(); }
2519 size_t size() const { return VRegs.size(); }
2521 } // namespace
2523 // Calculate the largest possible vregsPassed sets. These are the registers that
2524 // can pass through an MBB live, but may not be live every time. It is assumed
2525 // that all vregsPassed sets are empty before the call.
2526 void MachineVerifier::calcRegsPassed() {
2527 if (MF->empty())
2528 // ReversePostOrderTraversal doesn't handle empty functions.
2529 return;
2531 for (const MachineBasicBlock *MB :
2532 ReversePostOrderTraversal<const MachineFunction *>(MF)) {
2533 FilteringVRegSet VRegs;
2534 BBInfo &Info = MBBInfoMap[MB];
2535 assert(Info.reachable);
2537 VRegs.addToFilter(Info.regsKilled);
2538 VRegs.addToFilter(Info.regsLiveOut);
2539 for (const MachineBasicBlock *Pred : MB->predecessors()) {
2540 const BBInfo &PredInfo = MBBInfoMap[Pred];
2541 if (!PredInfo.reachable)
2542 continue;
2544 VRegs.add(PredInfo.regsLiveOut);
2545 VRegs.add(PredInfo.vregsPassed);
2547 Info.vregsPassed.reserve(VRegs.size());
2548 Info.vregsPassed.insert(VRegs.begin(), VRegs.end());
2552 // Calculate the set of virtual registers that must be passed through each basic
2553 // block in order to satisfy the requirements of successor blocks. This is very
2554 // similar to calcRegsPassed, only backwards.
2555 void MachineVerifier::calcRegsRequired() {
2556 // First push live-in regs to predecessors' vregsRequired.
2557 SmallPtrSet<const MachineBasicBlock*, 8> todo;
2558 for (const auto &MBB : *MF) {
2559 BBInfo &MInfo = MBBInfoMap[&MBB];
2560 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2561 BBInfo &PInfo = MBBInfoMap[Pred];
2562 if (PInfo.addRequired(MInfo.vregsLiveIn))
2563 todo.insert(Pred);
2566 // Handle the PHI node.
2567 for (const MachineInstr &MI : MBB.phis()) {
2568 for (unsigned i = 1, e = MI.getNumOperands(); i != e; i += 2) {
2569 // Skip those Operands which are undef regs or not regs.
2570 if (!MI.getOperand(i).isReg() || !MI.getOperand(i).readsReg())
2571 continue;
2573 // Get register and predecessor for one PHI edge.
2574 Register Reg = MI.getOperand(i).getReg();
2575 const MachineBasicBlock *Pred = MI.getOperand(i + 1).getMBB();
2577 BBInfo &PInfo = MBBInfoMap[Pred];
2578 if (PInfo.addRequired(Reg))
2579 todo.insert(Pred);
2584 // Iteratively push vregsRequired to predecessors. This will converge to the
2585 // same final state regardless of DenseSet iteration order.
2586 while (!todo.empty()) {
2587 const MachineBasicBlock *MBB = *todo.begin();
2588 todo.erase(MBB);
2589 BBInfo &MInfo = MBBInfoMap[MBB];
2590 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
2591 if (Pred == MBB)
2592 continue;
2593 BBInfo &SInfo = MBBInfoMap[Pred];
2594 if (SInfo.addRequired(MInfo.vregsRequired))
2595 todo.insert(Pred);
2600 // Check PHI instructions at the beginning of MBB. It is assumed that
2601 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2602 void MachineVerifier::checkPHIOps(const MachineBasicBlock &MBB) {
2603 BBInfo &MInfo = MBBInfoMap[&MBB];
2605 SmallPtrSet<const MachineBasicBlock*, 8> seen;
2606 for (const MachineInstr &Phi : MBB) {
2607 if (!Phi.isPHI())
2608 break;
2609 seen.clear();
2611 const MachineOperand &MODef = Phi.getOperand(0);
2612 if (!MODef.isReg() || !MODef.isDef()) {
2613 report("Expected first PHI operand to be a register def", &MODef, 0);
2614 continue;
2616 if (MODef.isTied() || MODef.isImplicit() || MODef.isInternalRead() ||
2617 MODef.isEarlyClobber() || MODef.isDebug())
2618 report("Unexpected flag on PHI operand", &MODef, 0);
2619 Register DefReg = MODef.getReg();
2620 if (!Register::isVirtualRegister(DefReg))
2621 report("Expected first PHI operand to be a virtual register", &MODef, 0);
2623 for (unsigned I = 1, E = Phi.getNumOperands(); I != E; I += 2) {
2624 const MachineOperand &MO0 = Phi.getOperand(I);
2625 if (!MO0.isReg()) {
2626 report("Expected PHI operand to be a register", &MO0, I);
2627 continue;
2629 if (MO0.isImplicit() || MO0.isInternalRead() || MO0.isEarlyClobber() ||
2630 MO0.isDebug() || MO0.isTied())
2631 report("Unexpected flag on PHI operand", &MO0, I);
2633 const MachineOperand &MO1 = Phi.getOperand(I + 1);
2634 if (!MO1.isMBB()) {
2635 report("Expected PHI operand to be a basic block", &MO1, I + 1);
2636 continue;
2639 const MachineBasicBlock &Pre = *MO1.getMBB();
2640 if (!Pre.isSuccessor(&MBB)) {
2641 report("PHI input is not a predecessor block", &MO1, I + 1);
2642 continue;
2645 if (MInfo.reachable) {
2646 seen.insert(&Pre);
2647 BBInfo &PrInfo = MBBInfoMap[&Pre];
2648 if (!MO0.isUndef() && PrInfo.reachable &&
2649 !PrInfo.isLiveOut(MO0.getReg()))
2650 report("PHI operand is not live-out from predecessor", &MO0, I);
2654 // Did we see all predecessors?
2655 if (MInfo.reachable) {
2656 for (MachineBasicBlock *Pred : MBB.predecessors()) {
2657 if (!seen.count(Pred)) {
2658 report("Missing PHI operand", &Phi);
2659 errs() << printMBBReference(*Pred)
2660 << " is a predecessor according to the CFG.\n";
2667 void MachineVerifier::visitMachineFunctionAfter() {
2668 calcRegsPassed();
2670 for (const MachineBasicBlock &MBB : *MF)
2671 checkPHIOps(MBB);
2673 // Now check liveness info if available
2674 calcRegsRequired();
2676 // Check for killed virtual registers that should be live out.
2677 for (const auto &MBB : *MF) {
2678 BBInfo &MInfo = MBBInfoMap[&MBB];
2679 for (Register VReg : MInfo.vregsRequired)
2680 if (MInfo.regsKilled.count(VReg)) {
2681 report("Virtual register killed in block, but needed live out.", &MBB);
2682 errs() << "Virtual register " << printReg(VReg)
2683 << " is used after the block.\n";
2687 if (!MF->empty()) {
2688 BBInfo &MInfo = MBBInfoMap[&MF->front()];
2689 for (Register VReg : MInfo.vregsRequired) {
2690 report("Virtual register defs don't dominate all uses.", MF);
2691 report_context_vreg(VReg);
2695 if (LiveVars)
2696 verifyLiveVariables();
2697 if (LiveInts)
2698 verifyLiveIntervals();
2700 // Check live-in list of each MBB. If a register is live into MBB, check
2701 // that the register is in regsLiveOut of each predecessor block. Since
2702 // this must come from a definition in the predecesssor or its live-in
2703 // list, this will catch a live-through case where the predecessor does not
2704 // have the register in its live-in list. This currently only checks
2705 // registers that have no aliases, are not allocatable and are not
2706 // reserved, which could mean a condition code register for instance.
2707 if (MRI->tracksLiveness())
2708 for (const auto &MBB : *MF)
2709 for (MachineBasicBlock::RegisterMaskPair P : MBB.liveins()) {
2710 MCPhysReg LiveInReg = P.PhysReg;
2711 bool hasAliases = MCRegAliasIterator(LiveInReg, TRI, false).isValid();
2712 if (hasAliases || isAllocatable(LiveInReg) || isReserved(LiveInReg))
2713 continue;
2714 for (const MachineBasicBlock *Pred : MBB.predecessors()) {
2715 BBInfo &PInfo = MBBInfoMap[Pred];
2716 if (!PInfo.regsLiveOut.count(LiveInReg)) {
2717 report("Live in register not found to be live out from predecessor.",
2718 &MBB);
2719 errs() << TRI->getName(LiveInReg)
2720 << " not found to be live out from "
2721 << printMBBReference(*Pred) << "\n";
2726 for (auto CSInfo : MF->getCallSitesInfo())
2727 if (!CSInfo.first->isCall())
2728 report("Call site info referencing instruction that is not call", MF);
2730 // If there's debug-info, check that we don't have any duplicate value
2731 // tracking numbers.
2732 if (MF->getFunction().getSubprogram()) {
2733 DenseSet<unsigned> SeenNumbers;
2734 for (auto &MBB : *MF) {
2735 for (auto &MI : MBB) {
2736 if (auto Num = MI.peekDebugInstrNum()) {
2737 auto Result = SeenNumbers.insert((unsigned)Num);
2738 if (!Result.second)
2739 report("Instruction has a duplicated value tracking number", &MI);
2746 void MachineVerifier::verifyLiveVariables() {
2747 assert(LiveVars && "Don't call verifyLiveVariables without LiveVars");
2748 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2749 Register Reg = Register::index2VirtReg(I);
2750 LiveVariables::VarInfo &VI = LiveVars->getVarInfo(Reg);
2751 for (const auto &MBB : *MF) {
2752 BBInfo &MInfo = MBBInfoMap[&MBB];
2754 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
2755 if (MInfo.vregsRequired.count(Reg)) {
2756 if (!VI.AliveBlocks.test(MBB.getNumber())) {
2757 report("LiveVariables: Block missing from AliveBlocks", &MBB);
2758 errs() << "Virtual register " << printReg(Reg)
2759 << " must be live through the block.\n";
2761 } else {
2762 if (VI.AliveBlocks.test(MBB.getNumber())) {
2763 report("LiveVariables: Block should not be in AliveBlocks", &MBB);
2764 errs() << "Virtual register " << printReg(Reg)
2765 << " is not needed live through the block.\n";
2772 void MachineVerifier::verifyLiveIntervals() {
2773 assert(LiveInts && "Don't call verifyLiveIntervals without LiveInts");
2774 for (unsigned I = 0, E = MRI->getNumVirtRegs(); I != E; ++I) {
2775 Register Reg = Register::index2VirtReg(I);
2777 // Spilling and splitting may leave unused registers around. Skip them.
2778 if (MRI->reg_nodbg_empty(Reg))
2779 continue;
2781 if (!LiveInts->hasInterval(Reg)) {
2782 report("Missing live interval for virtual register", MF);
2783 errs() << printReg(Reg, TRI) << " still has defs or uses\n";
2784 continue;
2787 const LiveInterval &LI = LiveInts->getInterval(Reg);
2788 assert(Reg == LI.reg() && "Invalid reg to interval mapping");
2789 verifyLiveInterval(LI);
2792 // Verify all the cached regunit intervals.
2793 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i)
2794 if (const LiveRange *LR = LiveInts->getCachedRegUnit(i))
2795 verifyLiveRange(*LR, i);
2798 void MachineVerifier::verifyLiveRangeValue(const LiveRange &LR,
2799 const VNInfo *VNI, Register Reg,
2800 LaneBitmask LaneMask) {
2801 if (VNI->isUnused())
2802 return;
2804 const VNInfo *DefVNI = LR.getVNInfoAt(VNI->def);
2806 if (!DefVNI) {
2807 report("Value not live at VNInfo def and not marked unused", MF);
2808 report_context(LR, Reg, LaneMask);
2809 report_context(*VNI);
2810 return;
2813 if (DefVNI != VNI) {
2814 report("Live segment at def has different VNInfo", MF);
2815 report_context(LR, Reg, LaneMask);
2816 report_context(*VNI);
2817 return;
2820 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(VNI->def);
2821 if (!MBB) {
2822 report("Invalid VNInfo definition index", MF);
2823 report_context(LR, Reg, LaneMask);
2824 report_context(*VNI);
2825 return;
2828 if (VNI->isPHIDef()) {
2829 if (VNI->def != LiveInts->getMBBStartIdx(MBB)) {
2830 report("PHIDef VNInfo is not defined at MBB start", MBB);
2831 report_context(LR, Reg, LaneMask);
2832 report_context(*VNI);
2834 return;
2837 // Non-PHI def.
2838 const MachineInstr *MI = LiveInts->getInstructionFromIndex(VNI->def);
2839 if (!MI) {
2840 report("No instruction at VNInfo def index", MBB);
2841 report_context(LR, Reg, LaneMask);
2842 report_context(*VNI);
2843 return;
2846 if (Reg != 0) {
2847 bool hasDef = false;
2848 bool isEarlyClobber = false;
2849 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
2850 if (!MOI->isReg() || !MOI->isDef())
2851 continue;
2852 if (Register::isVirtualRegister(Reg)) {
2853 if (MOI->getReg() != Reg)
2854 continue;
2855 } else {
2856 if (!Register::isPhysicalRegister(MOI->getReg()) ||
2857 !TRI->hasRegUnit(MOI->getReg(), Reg))
2858 continue;
2860 if (LaneMask.any() &&
2861 (TRI->getSubRegIndexLaneMask(MOI->getSubReg()) & LaneMask).none())
2862 continue;
2863 hasDef = true;
2864 if (MOI->isEarlyClobber())
2865 isEarlyClobber = true;
2868 if (!hasDef) {
2869 report("Defining instruction does not modify register", MI);
2870 report_context(LR, Reg, LaneMask);
2871 report_context(*VNI);
2874 // Early clobber defs begin at USE slots, but other defs must begin at
2875 // DEF slots.
2876 if (isEarlyClobber) {
2877 if (!VNI->def.isEarlyClobber()) {
2878 report("Early clobber def must be at an early-clobber slot", MBB);
2879 report_context(LR, Reg, LaneMask);
2880 report_context(*VNI);
2882 } else if (!VNI->def.isRegister()) {
2883 report("Non-PHI, non-early clobber def must be at a register slot", MBB);
2884 report_context(LR, Reg, LaneMask);
2885 report_context(*VNI);
2890 void MachineVerifier::verifyLiveRangeSegment(const LiveRange &LR,
2891 const LiveRange::const_iterator I,
2892 Register Reg,
2893 LaneBitmask LaneMask) {
2894 const LiveRange::Segment &S = *I;
2895 const VNInfo *VNI = S.valno;
2896 assert(VNI && "Live segment has no valno");
2898 if (VNI->id >= LR.getNumValNums() || VNI != LR.getValNumInfo(VNI->id)) {
2899 report("Foreign valno in live segment", MF);
2900 report_context(LR, Reg, LaneMask);
2901 report_context(S);
2902 report_context(*VNI);
2905 if (VNI->isUnused()) {
2906 report("Live segment valno is marked unused", MF);
2907 report_context(LR, Reg, LaneMask);
2908 report_context(S);
2911 const MachineBasicBlock *MBB = LiveInts->getMBBFromIndex(S.start);
2912 if (!MBB) {
2913 report("Bad start of live segment, no basic block", MF);
2914 report_context(LR, Reg, LaneMask);
2915 report_context(S);
2916 return;
2918 SlotIndex MBBStartIdx = LiveInts->getMBBStartIdx(MBB);
2919 if (S.start != MBBStartIdx && S.start != VNI->def) {
2920 report("Live segment must begin at MBB entry or valno def", MBB);
2921 report_context(LR, Reg, LaneMask);
2922 report_context(S);
2925 const MachineBasicBlock *EndMBB =
2926 LiveInts->getMBBFromIndex(S.end.getPrevSlot());
2927 if (!EndMBB) {
2928 report("Bad end of live segment, no basic block", MF);
2929 report_context(LR, Reg, LaneMask);
2930 report_context(S);
2931 return;
2934 // No more checks for live-out segments.
2935 if (S.end == LiveInts->getMBBEndIdx(EndMBB))
2936 return;
2938 // RegUnit intervals are allowed dead phis.
2939 if (!Register::isVirtualRegister(Reg) && VNI->isPHIDef() &&
2940 S.start == VNI->def && S.end == VNI->def.getDeadSlot())
2941 return;
2943 // The live segment is ending inside EndMBB
2944 const MachineInstr *MI =
2945 LiveInts->getInstructionFromIndex(S.end.getPrevSlot());
2946 if (!MI) {
2947 report("Live segment doesn't end at a valid instruction", EndMBB);
2948 report_context(LR, Reg, LaneMask);
2949 report_context(S);
2950 return;
2953 // The block slot must refer to a basic block boundary.
2954 if (S.end.isBlock()) {
2955 report("Live segment ends at B slot of an instruction", EndMBB);
2956 report_context(LR, Reg, LaneMask);
2957 report_context(S);
2960 if (S.end.isDead()) {
2961 // Segment ends on the dead slot.
2962 // That means there must be a dead def.
2963 if (!SlotIndex::isSameInstr(S.start, S.end)) {
2964 report("Live segment ending at dead slot spans instructions", EndMBB);
2965 report_context(LR, Reg, LaneMask);
2966 report_context(S);
2970 // After tied operands are rewritten, a live segment can only end at an
2971 // early-clobber slot if it is being redefined by an early-clobber def.
2972 // TODO: Before tied operands are rewritten, a live segment can only end at an
2973 // early-clobber slot if the last use is tied to an early-clobber def.
2974 if (MF->getProperties().hasProperty(
2975 MachineFunctionProperties::Property::TiedOpsRewritten) &&
2976 S.end.isEarlyClobber()) {
2977 if (I+1 == LR.end() || (I+1)->start != S.end) {
2978 report("Live segment ending at early clobber slot must be "
2979 "redefined by an EC def in the same instruction", EndMBB);
2980 report_context(LR, Reg, LaneMask);
2981 report_context(S);
2985 // The following checks only apply to virtual registers. Physreg liveness
2986 // is too weird to check.
2987 if (Register::isVirtualRegister(Reg)) {
2988 // A live segment can end with either a redefinition, a kill flag on a
2989 // use, or a dead flag on a def.
2990 bool hasRead = false;
2991 bool hasSubRegDef = false;
2992 bool hasDeadDef = false;
2993 for (ConstMIBundleOperands MOI(*MI); MOI.isValid(); ++MOI) {
2994 if (!MOI->isReg() || MOI->getReg() != Reg)
2995 continue;
2996 unsigned Sub = MOI->getSubReg();
2997 LaneBitmask SLM = Sub != 0 ? TRI->getSubRegIndexLaneMask(Sub)
2998 : LaneBitmask::getAll();
2999 if (MOI->isDef()) {
3000 if (Sub != 0) {
3001 hasSubRegDef = true;
3002 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3003 // mask for subregister defs. Read-undef defs will be handled by
3004 // readsReg below.
3005 SLM = ~SLM;
3007 if (MOI->isDead())
3008 hasDeadDef = true;
3010 if (LaneMask.any() && (LaneMask & SLM).none())
3011 continue;
3012 if (MOI->readsReg())
3013 hasRead = true;
3015 if (S.end.isDead()) {
3016 // Make sure that the corresponding machine operand for a "dead" live
3017 // range has the dead flag. We cannot perform this check for subregister
3018 // liveranges as partially dead values are allowed.
3019 if (LaneMask.none() && !hasDeadDef) {
3020 report("Instruction ending live segment on dead slot has no dead flag",
3021 MI);
3022 report_context(LR, Reg, LaneMask);
3023 report_context(S);
3025 } else {
3026 if (!hasRead) {
3027 // When tracking subregister liveness, the main range must start new
3028 // values on partial register writes, even if there is no read.
3029 if (!MRI->shouldTrackSubRegLiveness(Reg) || LaneMask.any() ||
3030 !hasSubRegDef) {
3031 report("Instruction ending live segment doesn't read the register",
3032 MI);
3033 report_context(LR, Reg, LaneMask);
3034 report_context(S);
3040 // Now check all the basic blocks in this live segment.
3041 MachineFunction::const_iterator MFI = MBB->getIterator();
3042 // Is this live segment the beginning of a non-PHIDef VN?
3043 if (S.start == VNI->def && !VNI->isPHIDef()) {
3044 // Not live-in to any blocks.
3045 if (MBB == EndMBB)
3046 return;
3047 // Skip this block.
3048 ++MFI;
3051 SmallVector<SlotIndex, 4> Undefs;
3052 if (LaneMask.any()) {
3053 LiveInterval &OwnerLI = LiveInts->getInterval(Reg);
3054 OwnerLI.computeSubRangeUndefs(Undefs, LaneMask, *MRI, *Indexes);
3057 while (true) {
3058 assert(LiveInts->isLiveInToMBB(LR, &*MFI));
3059 // We don't know how to track physregs into a landing pad.
3060 if (!Register::isVirtualRegister(Reg) && MFI->isEHPad()) {
3061 if (&*MFI == EndMBB)
3062 break;
3063 ++MFI;
3064 continue;
3067 // Is VNI a PHI-def in the current block?
3068 bool IsPHI = VNI->isPHIDef() &&
3069 VNI->def == LiveInts->getMBBStartIdx(&*MFI);
3071 // Check that VNI is live-out of all predecessors.
3072 for (const MachineBasicBlock *Pred : MFI->predecessors()) {
3073 SlotIndex PEnd = LiveInts->getMBBEndIdx(Pred);
3074 // Predecessor of landing pad live-out on last call.
3075 if (MFI->isEHPad()) {
3076 for (const MachineInstr &MI : llvm::reverse(*Pred)) {
3077 if (MI.isCall()) {
3078 PEnd = Indexes->getInstructionIndex(MI).getBoundaryIndex();
3079 break;
3083 const VNInfo *PVNI = LR.getVNInfoBefore(PEnd);
3085 // All predecessors must have a live-out value. However for a phi
3086 // instruction with subregister intervals
3087 // only one of the subregisters (not necessarily the current one) needs to
3088 // be defined.
3089 if (!PVNI && (LaneMask.none() || !IsPHI)) {
3090 if (LiveRangeCalc::isJointlyDominated(Pred, Undefs, *Indexes))
3091 continue;
3092 report("Register not marked live out of predecessor", Pred);
3093 report_context(LR, Reg, LaneMask);
3094 report_context(*VNI);
3095 errs() << " live into " << printMBBReference(*MFI) << '@'
3096 << LiveInts->getMBBStartIdx(&*MFI) << ", not live before "
3097 << PEnd << '\n';
3098 continue;
3101 // Only PHI-defs can take different predecessor values.
3102 if (!IsPHI && PVNI != VNI) {
3103 report("Different value live out of predecessor", Pred);
3104 report_context(LR, Reg, LaneMask);
3105 errs() << "Valno #" << PVNI->id << " live out of "
3106 << printMBBReference(*Pred) << '@' << PEnd << "\nValno #"
3107 << VNI->id << " live into " << printMBBReference(*MFI) << '@'
3108 << LiveInts->getMBBStartIdx(&*MFI) << '\n';
3111 if (&*MFI == EndMBB)
3112 break;
3113 ++MFI;
3117 void MachineVerifier::verifyLiveRange(const LiveRange &LR, Register Reg,
3118 LaneBitmask LaneMask) {
3119 for (const VNInfo *VNI : LR.valnos)
3120 verifyLiveRangeValue(LR, VNI, Reg, LaneMask);
3122 for (LiveRange::const_iterator I = LR.begin(), E = LR.end(); I != E; ++I)
3123 verifyLiveRangeSegment(LR, I, Reg, LaneMask);
3126 void MachineVerifier::verifyLiveInterval(const LiveInterval &LI) {
3127 Register Reg = LI.reg();
3128 assert(Register::isVirtualRegister(Reg));
3129 verifyLiveRange(LI, Reg);
3131 LaneBitmask Mask;
3132 LaneBitmask MaxMask = MRI->getMaxLaneMaskForVReg(Reg);
3133 for (const LiveInterval::SubRange &SR : LI.subranges()) {
3134 if ((Mask & SR.LaneMask).any()) {
3135 report("Lane masks of sub ranges overlap in live interval", MF);
3136 report_context(LI);
3138 if ((SR.LaneMask & ~MaxMask).any()) {
3139 report("Subrange lanemask is invalid", MF);
3140 report_context(LI);
3142 if (SR.empty()) {
3143 report("Subrange must not be empty", MF);
3144 report_context(SR, LI.reg(), SR.LaneMask);
3146 Mask |= SR.LaneMask;
3147 verifyLiveRange(SR, LI.reg(), SR.LaneMask);
3148 if (!LI.covers(SR)) {
3149 report("A Subrange is not covered by the main range", MF);
3150 report_context(LI);
3154 // Check the LI only has one connected component.
3155 ConnectedVNInfoEqClasses ConEQ(*LiveInts);
3156 unsigned NumComp = ConEQ.Classify(LI);
3157 if (NumComp > 1) {
3158 report("Multiple connected components in live interval", MF);
3159 report_context(LI);
3160 for (unsigned comp = 0; comp != NumComp; ++comp) {
3161 errs() << comp << ": valnos";
3162 for (const VNInfo *I : LI.valnos)
3163 if (comp == ConEQ.getEqClass(I))
3164 errs() << ' ' << I->id;
3165 errs() << '\n';
3170 namespace {
3172 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3173 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3174 // value is zero.
3175 // We use a bool plus an integer to capture the stack state.
3176 struct StackStateOfBB {
3177 StackStateOfBB() = default;
3178 StackStateOfBB(int EntryVal, int ExitVal, bool EntrySetup, bool ExitSetup) :
3179 EntryValue(EntryVal), ExitValue(ExitVal), EntryIsSetup(EntrySetup),
3180 ExitIsSetup(ExitSetup) {}
3182 // Can be negative, which means we are setting up a frame.
3183 int EntryValue = 0;
3184 int ExitValue = 0;
3185 bool EntryIsSetup = false;
3186 bool ExitIsSetup = false;
3189 } // end anonymous namespace
3191 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3192 /// by a FrameDestroy <n>, stack adjustments are identical on all
3193 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
3194 void MachineVerifier::verifyStackFrame() {
3195 unsigned FrameSetupOpcode = TII->getCallFrameSetupOpcode();
3196 unsigned FrameDestroyOpcode = TII->getCallFrameDestroyOpcode();
3197 if (FrameSetupOpcode == ~0u && FrameDestroyOpcode == ~0u)
3198 return;
3200 SmallVector<StackStateOfBB, 8> SPState;
3201 SPState.resize(MF->getNumBlockIDs());
3202 df_iterator_default_set<const MachineBasicBlock*> Reachable;
3204 // Visit the MBBs in DFS order.
3205 for (df_ext_iterator<const MachineFunction *,
3206 df_iterator_default_set<const MachineBasicBlock *>>
3207 DFI = df_ext_begin(MF, Reachable), DFE = df_ext_end(MF, Reachable);
3208 DFI != DFE; ++DFI) {
3209 const MachineBasicBlock *MBB = *DFI;
3211 StackStateOfBB BBState;
3212 // Check the exit state of the DFS stack predecessor.
3213 if (DFI.getPathLength() >= 2) {
3214 const MachineBasicBlock *StackPred = DFI.getPath(DFI.getPathLength() - 2);
3215 assert(Reachable.count(StackPred) &&
3216 "DFS stack predecessor is already visited.\n");
3217 BBState.EntryValue = SPState[StackPred->getNumber()].ExitValue;
3218 BBState.EntryIsSetup = SPState[StackPred->getNumber()].ExitIsSetup;
3219 BBState.ExitValue = BBState.EntryValue;
3220 BBState.ExitIsSetup = BBState.EntryIsSetup;
3223 // Update stack state by checking contents of MBB.
3224 for (const auto &I : *MBB) {
3225 if (I.getOpcode() == FrameSetupOpcode) {
3226 if (BBState.ExitIsSetup)
3227 report("FrameSetup is after another FrameSetup", &I);
3228 BBState.ExitValue -= TII->getFrameTotalSize(I);
3229 BBState.ExitIsSetup = true;
3232 if (I.getOpcode() == FrameDestroyOpcode) {
3233 int Size = TII->getFrameTotalSize(I);
3234 if (!BBState.ExitIsSetup)
3235 report("FrameDestroy is not after a FrameSetup", &I);
3236 int AbsSPAdj = BBState.ExitValue < 0 ? -BBState.ExitValue :
3237 BBState.ExitValue;
3238 if (BBState.ExitIsSetup && AbsSPAdj != Size) {
3239 report("FrameDestroy <n> is after FrameSetup <m>", &I);
3240 errs() << "FrameDestroy <" << Size << "> is after FrameSetup <"
3241 << AbsSPAdj << ">.\n";
3243 BBState.ExitValue += Size;
3244 BBState.ExitIsSetup = false;
3247 SPState[MBB->getNumber()] = BBState;
3249 // Make sure the exit state of any predecessor is consistent with the entry
3250 // state.
3251 for (const MachineBasicBlock *Pred : MBB->predecessors()) {
3252 if (Reachable.count(Pred) &&
3253 (SPState[Pred->getNumber()].ExitValue != BBState.EntryValue ||
3254 SPState[Pred->getNumber()].ExitIsSetup != BBState.EntryIsSetup)) {
3255 report("The exit stack state of a predecessor is inconsistent.", MBB);
3256 errs() << "Predecessor " << printMBBReference(*Pred)
3257 << " has exit state (" << SPState[Pred->getNumber()].ExitValue
3258 << ", " << SPState[Pred->getNumber()].ExitIsSetup << "), while "
3259 << printMBBReference(*MBB) << " has entry state ("
3260 << BBState.EntryValue << ", " << BBState.EntryIsSetup << ").\n";
3264 // Make sure the entry state of any successor is consistent with the exit
3265 // state.
3266 for (const MachineBasicBlock *Succ : MBB->successors()) {
3267 if (Reachable.count(Succ) &&
3268 (SPState[Succ->getNumber()].EntryValue != BBState.ExitValue ||
3269 SPState[Succ->getNumber()].EntryIsSetup != BBState.ExitIsSetup)) {
3270 report("The entry stack state of a successor is inconsistent.", MBB);
3271 errs() << "Successor " << printMBBReference(*Succ)
3272 << " has entry state (" << SPState[Succ->getNumber()].EntryValue
3273 << ", " << SPState[Succ->getNumber()].EntryIsSetup << "), while "
3274 << printMBBReference(*MBB) << " has exit state ("
3275 << BBState.ExitValue << ", " << BBState.ExitIsSetup << ").\n";
3279 // Make sure a basic block with return ends with zero stack adjustment.
3280 if (!MBB->empty() && MBB->back().isReturn()) {
3281 if (BBState.ExitIsSetup)
3282 report("A return block ends with a FrameSetup.", MBB);
3283 if (BBState.ExitValue)
3284 report("A return block ends with a nonzero stack adjustment.", MBB);