1 //===- MachineVerifier.cpp - Machine Code Verifier ------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // Pass to verify generated machine code. The following is checked:
11 // Operand counts: All explicit operands must be present.
13 // Register classes: All physical and virtual register operands must be
14 // compatible with the register class required by the instruction descriptor.
16 // Register live intervals: Registers must be defined only once, and must be
17 // defined before use.
19 // The machine code verifier is enabled with the command-line option
20 // -verify-machineinstrs.
21 //===----------------------------------------------------------------------===//
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/DenseMap.h"
25 #include "llvm/ADT/DenseSet.h"
26 #include "llvm/ADT/DepthFirstIterator.h"
27 #include "llvm/ADT/PostOrderIterator.h"
28 #include "llvm/ADT/STLExtras.h"
29 #include "llvm/ADT/SetOperations.h"
30 #include "llvm/ADT/SmallPtrSet.h"
31 #include "llvm/ADT/SmallVector.h"
32 #include "llvm/ADT/StringRef.h"
33 #include "llvm/ADT/Twine.h"
34 #include "llvm/CodeGen/CodeGenCommonISel.h"
35 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
36 #include "llvm/CodeGen/LiveInterval.h"
37 #include "llvm/CodeGen/LiveIntervals.h"
38 #include "llvm/CodeGen/LiveRangeCalc.h"
39 #include "llvm/CodeGen/LiveStacks.h"
40 #include "llvm/CodeGen/LiveVariables.h"
41 #include "llvm/CodeGen/LowLevelType.h"
42 #include "llvm/CodeGen/MachineBasicBlock.h"
43 #include "llvm/CodeGen/MachineFrameInfo.h"
44 #include "llvm/CodeGen/MachineFunction.h"
45 #include "llvm/CodeGen/MachineFunctionPass.h"
46 #include "llvm/CodeGen/MachineInstr.h"
47 #include "llvm/CodeGen/MachineInstrBundle.h"
48 #include "llvm/CodeGen/MachineMemOperand.h"
49 #include "llvm/CodeGen/MachineOperand.h"
50 #include "llvm/CodeGen/MachineRegisterInfo.h"
51 #include "llvm/CodeGen/PseudoSourceValue.h"
52 #include "llvm/CodeGen/RegisterBank.h"
53 #include "llvm/CodeGen/RegisterBankInfo.h"
54 #include "llvm/CodeGen/SlotIndexes.h"
55 #include "llvm/CodeGen/StackMaps.h"
56 #include "llvm/CodeGen/TargetInstrInfo.h"
57 #include "llvm/CodeGen/TargetOpcodes.h"
58 #include "llvm/CodeGen/TargetRegisterInfo.h"
59 #include "llvm/CodeGen/TargetSubtargetInfo.h"
60 #include "llvm/IR/BasicBlock.h"
61 #include "llvm/IR/Constants.h"
62 #include "llvm/IR/EHPersonalities.h"
63 #include "llvm/IR/Function.h"
64 #include "llvm/IR/InlineAsm.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/InitializePasses.h"
67 #include "llvm/MC/LaneBitmask.h"
68 #include "llvm/MC/MCAsmInfo.h"
69 #include "llvm/MC/MCDwarf.h"
70 #include "llvm/MC/MCInstrDesc.h"
71 #include "llvm/MC/MCRegisterInfo.h"
72 #include "llvm/MC/MCTargetOptions.h"
73 #include "llvm/Pass.h"
74 #include "llvm/Support/Casting.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/MathExtras.h"
77 #include "llvm/Support/ModRef.h"
78 #include "llvm/Support/raw_ostream.h"
79 #include "llvm/Target/TargetMachine.h"
92 struct MachineVerifier
{
93 MachineVerifier(Pass
*pass
, const char *b
) : PASS(pass
), Banner(b
) {}
95 MachineVerifier(const char *b
, LiveVariables
*LiveVars
,
96 LiveIntervals
*LiveInts
, LiveStacks
*LiveStks
,
98 : Banner(b
), LiveVars(LiveVars
), LiveInts(LiveInts
), LiveStks(LiveStks
),
101 unsigned verify(const MachineFunction
&MF
);
103 Pass
*const PASS
= nullptr;
105 const MachineFunction
*MF
= nullptr;
106 const TargetMachine
*TM
= nullptr;
107 const TargetInstrInfo
*TII
= nullptr;
108 const TargetRegisterInfo
*TRI
= nullptr;
109 const MachineRegisterInfo
*MRI
= nullptr;
110 const RegisterBankInfo
*RBI
= nullptr;
112 unsigned foundErrors
= 0;
114 // Avoid querying the MachineFunctionProperties for each operand.
115 bool isFunctionRegBankSelected
= false;
116 bool isFunctionSelected
= false;
117 bool isFunctionTracksDebugUserValues
= false;
119 using RegVector
= SmallVector
<Register
, 16>;
120 using RegMaskVector
= SmallVector
<const uint32_t *, 4>;
121 using RegSet
= DenseSet
<Register
>;
122 using RegMap
= DenseMap
<Register
, const MachineInstr
*>;
123 using BlockSet
= SmallPtrSet
<const MachineBasicBlock
*, 8>;
125 const MachineInstr
*FirstNonPHI
= nullptr;
126 const MachineInstr
*FirstTerminator
= nullptr;
127 BlockSet FunctionBlocks
;
129 BitVector regsReserved
;
131 RegVector regsDefined
, regsDead
, regsKilled
;
132 RegMaskVector regMasks
;
136 // Add Reg and any sub-registers to RV
137 void addRegWithSubRegs(RegVector
&RV
, Register Reg
) {
139 if (Reg
.isPhysical())
140 append_range(RV
, TRI
->subregs(Reg
.asMCReg()));
144 // Is this MBB reachable from the MF entry point?
145 bool reachable
= false;
147 // Vregs that must be live in because they are used without being
148 // defined. Map value is the user. vregsLiveIn doesn't include regs
149 // that only are used by PHI nodes.
152 // Regs killed in MBB. They may be defined again, and will then be in both
153 // regsKilled and regsLiveOut.
156 // Regs defined in MBB and live out. Note that vregs passing through may
157 // be live out without being mentioned here.
160 // Vregs that pass through MBB untouched. This set is disjoint from
161 // regsKilled and regsLiveOut.
164 // Vregs that must pass through MBB because they are needed by a successor
165 // block. This set is disjoint from regsLiveOut.
166 RegSet vregsRequired
;
168 // Set versions of block's predecessor and successor lists.
169 BlockSet Preds
, Succs
;
173 // Add register to vregsRequired if it belongs there. Return true if
175 bool addRequired(Register Reg
) {
176 if (!Reg
.isVirtual())
178 if (regsLiveOut
.count(Reg
))
180 return vregsRequired
.insert(Reg
).second
;
183 // Same for a full set.
184 bool addRequired(const RegSet
&RS
) {
185 bool Changed
= false;
186 for (Register Reg
: RS
)
187 Changed
|= addRequired(Reg
);
191 // Same for a full map.
192 bool addRequired(const RegMap
&RM
) {
193 bool Changed
= false;
194 for (const auto &I
: RM
)
195 Changed
|= addRequired(I
.first
);
199 // Live-out registers are either in regsLiveOut or vregsPassed.
200 bool isLiveOut(Register Reg
) const {
201 return regsLiveOut
.count(Reg
) || vregsPassed
.count(Reg
);
205 // Extra register info per MBB.
206 DenseMap
<const MachineBasicBlock
*, BBInfo
> MBBInfoMap
;
208 bool isReserved(Register Reg
) {
209 return Reg
.id() < regsReserved
.size() && regsReserved
.test(Reg
.id());
212 bool isAllocatable(Register Reg
) const {
213 return Reg
.id() < TRI
->getNumRegs() && TRI
->isInAllocatableClass(Reg
) &&
214 !regsReserved
.test(Reg
.id());
217 // Analysis information if available
218 LiveVariables
*LiveVars
= nullptr;
219 LiveIntervals
*LiveInts
= nullptr;
220 LiveStacks
*LiveStks
= nullptr;
221 SlotIndexes
*Indexes
= nullptr;
223 void visitMachineFunctionBefore();
224 void visitMachineBasicBlockBefore(const MachineBasicBlock
*MBB
);
225 void visitMachineBundleBefore(const MachineInstr
*MI
);
227 /// Verify that all of \p MI's virtual register operands are scalars.
228 /// \returns True if all virtual register operands are scalar. False
230 bool verifyAllRegOpsScalar(const MachineInstr
&MI
,
231 const MachineRegisterInfo
&MRI
);
232 bool verifyVectorElementMatch(LLT Ty0
, LLT Ty1
, const MachineInstr
*MI
);
234 bool verifyGIntrinsicSideEffects(const MachineInstr
*MI
);
235 bool verifyGIntrinsicConvergence(const MachineInstr
*MI
);
236 void verifyPreISelGenericInstruction(const MachineInstr
*MI
);
238 void visitMachineInstrBefore(const MachineInstr
*MI
);
239 void visitMachineOperand(const MachineOperand
*MO
, unsigned MONum
);
240 void visitMachineBundleAfter(const MachineInstr
*MI
);
241 void visitMachineBasicBlockAfter(const MachineBasicBlock
*MBB
);
242 void visitMachineFunctionAfter();
244 void report(const char *msg
, const MachineFunction
*MF
);
245 void report(const char *msg
, const MachineBasicBlock
*MBB
);
246 void report(const char *msg
, const MachineInstr
*MI
);
247 void report(const char *msg
, const MachineOperand
*MO
, unsigned MONum
,
248 LLT MOVRegType
= LLT
{});
249 void report(const Twine
&Msg
, const MachineInstr
*MI
);
251 void report_context(const LiveInterval
&LI
) const;
252 void report_context(const LiveRange
&LR
, Register VRegUnit
,
253 LaneBitmask LaneMask
) const;
254 void report_context(const LiveRange::Segment
&S
) const;
255 void report_context(const VNInfo
&VNI
) const;
256 void report_context(SlotIndex Pos
) const;
257 void report_context(MCPhysReg PhysReg
) const;
258 void report_context_liverange(const LiveRange
&LR
) const;
259 void report_context_lanemask(LaneBitmask LaneMask
) const;
260 void report_context_vreg(Register VReg
) const;
261 void report_context_vreg_regunit(Register VRegOrUnit
) const;
263 void verifyInlineAsm(const MachineInstr
*MI
);
265 void checkLiveness(const MachineOperand
*MO
, unsigned MONum
);
266 void checkLivenessAtUse(const MachineOperand
*MO
, unsigned MONum
,
267 SlotIndex UseIdx
, const LiveRange
&LR
,
269 LaneBitmask LaneMask
= LaneBitmask::getNone());
270 void checkLivenessAtDef(const MachineOperand
*MO
, unsigned MONum
,
271 SlotIndex DefIdx
, const LiveRange
&LR
,
272 Register VRegOrUnit
, bool SubRangeCheck
= false,
273 LaneBitmask LaneMask
= LaneBitmask::getNone());
275 void markReachable(const MachineBasicBlock
*MBB
);
276 void calcRegsPassed();
277 void checkPHIOps(const MachineBasicBlock
&MBB
);
279 void calcRegsRequired();
280 void verifyLiveVariables();
281 void verifyLiveIntervals();
282 void verifyLiveInterval(const LiveInterval
&);
283 void verifyLiveRangeValue(const LiveRange
&, const VNInfo
*, Register
,
285 void verifyLiveRangeSegment(const LiveRange
&,
286 const LiveRange::const_iterator I
, Register
,
288 void verifyLiveRange(const LiveRange
&, Register
,
289 LaneBitmask LaneMask
= LaneBitmask::getNone());
291 void verifyStackFrame();
293 void verifySlotIndexes() const;
294 void verifyProperties(const MachineFunction
&MF
);
297 struct MachineVerifierPass
: public MachineFunctionPass
{
298 static char ID
; // Pass ID, replacement for typeid
300 const std::string Banner
;
302 MachineVerifierPass(std::string banner
= std::string())
303 : MachineFunctionPass(ID
), Banner(std::move(banner
)) {
304 initializeMachineVerifierPassPass(*PassRegistry::getPassRegistry());
307 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
308 AU
.addUsedIfAvailable
<LiveStacks
>();
309 AU
.addUsedIfAvailable
<LiveVariables
>();
310 AU
.addUsedIfAvailable
<SlotIndexes
>();
311 AU
.addUsedIfAvailable
<LiveIntervals
>();
312 AU
.setPreservesAll();
313 MachineFunctionPass::getAnalysisUsage(AU
);
316 bool runOnMachineFunction(MachineFunction
&MF
) override
{
317 // Skip functions that have known verification problems.
318 // FIXME: Remove this mechanism when all problematic passes have been
320 if (MF
.getProperties().hasProperty(
321 MachineFunctionProperties::Property::FailsVerification
))
324 unsigned FoundErrors
= MachineVerifier(this, Banner
.c_str()).verify(MF
);
326 report_fatal_error("Found "+Twine(FoundErrors
)+" machine code errors.");
331 } // end anonymous namespace
333 char MachineVerifierPass::ID
= 0;
335 INITIALIZE_PASS(MachineVerifierPass
, "machineverifier",
336 "Verify generated machine code", false, false)
338 FunctionPass
*llvm::createMachineVerifierPass(const std::string
&Banner
) {
339 return new MachineVerifierPass(Banner
);
342 void llvm::verifyMachineFunction(MachineFunctionAnalysisManager
*,
343 const std::string
&Banner
,
344 const MachineFunction
&MF
) {
345 // TODO: Use MFAM after porting below analyses.
346 // LiveVariables *LiveVars;
347 // LiveIntervals *LiveInts;
348 // LiveStacks *LiveStks;
349 // SlotIndexes *Indexes;
350 unsigned FoundErrors
= MachineVerifier(nullptr, Banner
.c_str()).verify(MF
);
352 report_fatal_error("Found " + Twine(FoundErrors
) + " machine code errors.");
355 bool MachineFunction::verify(Pass
*p
, const char *Banner
, bool AbortOnErrors
)
357 MachineFunction
&MF
= const_cast<MachineFunction
&>(*this);
358 unsigned FoundErrors
= MachineVerifier(p
, Banner
).verify(MF
);
359 if (AbortOnErrors
&& FoundErrors
)
360 report_fatal_error("Found "+Twine(FoundErrors
)+" machine code errors.");
361 return FoundErrors
== 0;
364 bool MachineFunction::verify(LiveIntervals
*LiveInts
, SlotIndexes
*Indexes
,
365 const char *Banner
, bool AbortOnErrors
) const {
366 MachineFunction
&MF
= const_cast<MachineFunction
&>(*this);
367 unsigned FoundErrors
=
368 MachineVerifier(Banner
, nullptr, LiveInts
, nullptr, Indexes
).verify(MF
);
369 if (AbortOnErrors
&& FoundErrors
)
370 report_fatal_error("Found " + Twine(FoundErrors
) + " machine code errors.");
371 return FoundErrors
== 0;
374 void MachineVerifier::verifySlotIndexes() const {
375 if (Indexes
== nullptr)
378 // Ensure the IdxMBB list is sorted by slot indexes.
380 for (SlotIndexes::MBBIndexIterator I
= Indexes
->MBBIndexBegin(),
381 E
= Indexes
->MBBIndexEnd(); I
!= E
; ++I
) {
382 assert(!Last
.isValid() || I
->first
> Last
);
387 void MachineVerifier::verifyProperties(const MachineFunction
&MF
) {
388 // If a pass has introduced virtual registers without clearing the
389 // NoVRegs property (or set it without allocating the vregs)
390 // then report an error.
391 if (MF
.getProperties().hasProperty(
392 MachineFunctionProperties::Property::NoVRegs
) &&
393 MRI
->getNumVirtRegs())
394 report("Function has NoVRegs property but there are VReg operands", &MF
);
397 unsigned MachineVerifier::verify(const MachineFunction
&MF
) {
401 TM
= &MF
.getTarget();
402 TII
= MF
.getSubtarget().getInstrInfo();
403 TRI
= MF
.getSubtarget().getRegisterInfo();
404 RBI
= MF
.getSubtarget().getRegBankInfo();
405 MRI
= &MF
.getRegInfo();
407 const bool isFunctionFailedISel
= MF
.getProperties().hasProperty(
408 MachineFunctionProperties::Property::FailedISel
);
410 // If we're mid-GlobalISel and we already triggered the fallback path then
411 // it's expected that the MIR is somewhat broken but that's ok since we'll
412 // reset it and clear the FailedISel attribute in ResetMachineFunctions.
413 if (isFunctionFailedISel
)
416 isFunctionRegBankSelected
= MF
.getProperties().hasProperty(
417 MachineFunctionProperties::Property::RegBankSelected
);
418 isFunctionSelected
= MF
.getProperties().hasProperty(
419 MachineFunctionProperties::Property::Selected
);
420 isFunctionTracksDebugUserValues
= MF
.getProperties().hasProperty(
421 MachineFunctionProperties::Property::TracksDebugUserValues
);
424 LiveInts
= PASS
->getAnalysisIfAvailable
<LiveIntervals
>();
425 // We don't want to verify LiveVariables if LiveIntervals is available.
427 LiveVars
= PASS
->getAnalysisIfAvailable
<LiveVariables
>();
428 LiveStks
= PASS
->getAnalysisIfAvailable
<LiveStacks
>();
429 Indexes
= PASS
->getAnalysisIfAvailable
<SlotIndexes
>();
434 verifyProperties(MF
);
436 visitMachineFunctionBefore();
437 for (const MachineBasicBlock
&MBB
: MF
) {
438 visitMachineBasicBlockBefore(&MBB
);
439 // Keep track of the current bundle header.
440 const MachineInstr
*CurBundle
= nullptr;
441 // Do we expect the next instruction to be part of the same bundle?
442 bool InBundle
= false;
444 for (const MachineInstr
&MI
: MBB
.instrs()) {
445 if (MI
.getParent() != &MBB
) {
446 report("Bad instruction parent pointer", &MBB
);
447 errs() << "Instruction: " << MI
;
451 // Check for consistent bundle flags.
452 if (InBundle
&& !MI
.isBundledWithPred())
453 report("Missing BundledPred flag, "
454 "BundledSucc was set on predecessor",
456 if (!InBundle
&& MI
.isBundledWithPred())
457 report("BundledPred flag is set, "
458 "but BundledSucc not set on predecessor",
461 // Is this a bundle header?
462 if (!MI
.isInsideBundle()) {
464 visitMachineBundleAfter(CurBundle
);
466 visitMachineBundleBefore(CurBundle
);
467 } else if (!CurBundle
)
468 report("No bundle header", &MI
);
469 visitMachineInstrBefore(&MI
);
470 for (unsigned I
= 0, E
= MI
.getNumOperands(); I
!= E
; ++I
) {
471 const MachineOperand
&Op
= MI
.getOperand(I
);
472 if (Op
.getParent() != &MI
) {
473 // Make sure to use correct addOperand / removeOperand / ChangeTo
474 // functions when replacing operands of a MachineInstr.
475 report("Instruction has operand with wrong parent set", &MI
);
478 visitMachineOperand(&Op
, I
);
481 // Was this the last bundled instruction?
482 InBundle
= MI
.isBundledWithSucc();
485 visitMachineBundleAfter(CurBundle
);
487 report("BundledSucc flag set on last instruction in block", &MBB
.back());
488 visitMachineBasicBlockAfter(&MBB
);
490 visitMachineFunctionAfter();
503 void MachineVerifier::report(const char *msg
, const MachineFunction
*MF
) {
506 if (!foundErrors
++) {
508 errs() << "# " << Banner
<< '\n';
509 if (LiveInts
!= nullptr)
510 LiveInts
->print(errs());
512 MF
->print(errs(), Indexes
);
514 errs() << "*** Bad machine code: " << msg
<< " ***\n"
515 << "- function: " << MF
->getName() << "\n";
518 void MachineVerifier::report(const char *msg
, const MachineBasicBlock
*MBB
) {
520 report(msg
, MBB
->getParent());
521 errs() << "- basic block: " << printMBBReference(*MBB
) << ' '
522 << MBB
->getName() << " (" << (const void *)MBB
<< ')';
524 errs() << " [" << Indexes
->getMBBStartIdx(MBB
)
525 << ';' << Indexes
->getMBBEndIdx(MBB
) << ')';
529 void MachineVerifier::report(const char *msg
, const MachineInstr
*MI
) {
531 report(msg
, MI
->getParent());
532 errs() << "- instruction: ";
533 if (Indexes
&& Indexes
->hasIndex(*MI
))
534 errs() << Indexes
->getInstructionIndex(*MI
) << '\t';
535 MI
->print(errs(), /*IsStandalone=*/true);
538 void MachineVerifier::report(const char *msg
, const MachineOperand
*MO
,
539 unsigned MONum
, LLT MOVRegType
) {
541 report(msg
, MO
->getParent());
542 errs() << "- operand " << MONum
<< ": ";
543 MO
->print(errs(), MOVRegType
, TRI
);
547 void MachineVerifier::report(const Twine
&Msg
, const MachineInstr
*MI
) {
548 report(Msg
.str().c_str(), MI
);
551 void MachineVerifier::report_context(SlotIndex Pos
) const {
552 errs() << "- at: " << Pos
<< '\n';
555 void MachineVerifier::report_context(const LiveInterval
&LI
) const {
556 errs() << "- interval: " << LI
<< '\n';
559 void MachineVerifier::report_context(const LiveRange
&LR
, Register VRegUnit
,
560 LaneBitmask LaneMask
) const {
561 report_context_liverange(LR
);
562 report_context_vreg_regunit(VRegUnit
);
564 report_context_lanemask(LaneMask
);
567 void MachineVerifier::report_context(const LiveRange::Segment
&S
) const {
568 errs() << "- segment: " << S
<< '\n';
571 void MachineVerifier::report_context(const VNInfo
&VNI
) const {
572 errs() << "- ValNo: " << VNI
.id
<< " (def " << VNI
.def
<< ")\n";
575 void MachineVerifier::report_context_liverange(const LiveRange
&LR
) const {
576 errs() << "- liverange: " << LR
<< '\n';
579 void MachineVerifier::report_context(MCPhysReg PReg
) const {
580 errs() << "- p. register: " << printReg(PReg
, TRI
) << '\n';
583 void MachineVerifier::report_context_vreg(Register VReg
) const {
584 errs() << "- v. register: " << printReg(VReg
, TRI
) << '\n';
587 void MachineVerifier::report_context_vreg_regunit(Register VRegOrUnit
) const {
588 if (VRegOrUnit
.isVirtual()) {
589 report_context_vreg(VRegOrUnit
);
591 errs() << "- regunit: " << printRegUnit(VRegOrUnit
, TRI
) << '\n';
595 void MachineVerifier::report_context_lanemask(LaneBitmask LaneMask
) const {
596 errs() << "- lanemask: " << PrintLaneMask(LaneMask
) << '\n';
599 void MachineVerifier::markReachable(const MachineBasicBlock
*MBB
) {
600 BBInfo
&MInfo
= MBBInfoMap
[MBB
];
601 if (!MInfo
.reachable
) {
602 MInfo
.reachable
= true;
603 for (const MachineBasicBlock
*Succ
: MBB
->successors())
608 void MachineVerifier::visitMachineFunctionBefore() {
609 lastIndex
= SlotIndex();
610 regsReserved
= MRI
->reservedRegsFrozen() ? MRI
->getReservedRegs()
611 : TRI
->getReservedRegs(*MF
);
614 markReachable(&MF
->front());
616 // Build a set of the basic blocks in the function.
617 FunctionBlocks
.clear();
618 for (const auto &MBB
: *MF
) {
619 FunctionBlocks
.insert(&MBB
);
620 BBInfo
&MInfo
= MBBInfoMap
[&MBB
];
622 MInfo
.Preds
.insert(MBB
.pred_begin(), MBB
.pred_end());
623 if (MInfo
.Preds
.size() != MBB
.pred_size())
624 report("MBB has duplicate entries in its predecessor list.", &MBB
);
626 MInfo
.Succs
.insert(MBB
.succ_begin(), MBB
.succ_end());
627 if (MInfo
.Succs
.size() != MBB
.succ_size())
628 report("MBB has duplicate entries in its successor list.", &MBB
);
631 // Check that the register use lists are sane.
632 MRI
->verifyUseLists();
639 MachineVerifier::visitMachineBasicBlockBefore(const MachineBasicBlock
*MBB
) {
640 FirstTerminator
= nullptr;
641 FirstNonPHI
= nullptr;
643 if (!MF
->getProperties().hasProperty(
644 MachineFunctionProperties::Property::NoPHIs
) && MRI
->tracksLiveness()) {
645 // If this block has allocatable physical registers live-in, check that
646 // it is an entry block or landing pad.
647 for (const auto &LI
: MBB
->liveins()) {
648 if (isAllocatable(LI
.PhysReg
) && !MBB
->isEHPad() &&
649 MBB
->getIterator() != MBB
->getParent()->begin() &&
650 !MBB
->isInlineAsmBrIndirectTarget()) {
651 report("MBB has allocatable live-in, but isn't entry, landing-pad, or "
652 "inlineasm-br-indirect-target.",
654 report_context(LI
.PhysReg
);
659 if (MBB
->isIRBlockAddressTaken()) {
660 if (!MBB
->getAddressTakenIRBlock()->hasAddressTaken())
661 report("ir-block-address-taken is associated with basic block not used by "
666 // Count the number of landing pad successors.
667 SmallPtrSet
<const MachineBasicBlock
*, 4> LandingPadSuccs
;
668 for (const auto *succ
: MBB
->successors()) {
670 LandingPadSuccs
.insert(succ
);
671 if (!FunctionBlocks
.count(succ
))
672 report("MBB has successor that isn't part of the function.", MBB
);
673 if (!MBBInfoMap
[succ
].Preds
.count(MBB
)) {
674 report("Inconsistent CFG", MBB
);
675 errs() << "MBB is not in the predecessor list of the successor "
676 << printMBBReference(*succ
) << ".\n";
680 // Check the predecessor list.
681 for (const MachineBasicBlock
*Pred
: MBB
->predecessors()) {
682 if (!FunctionBlocks
.count(Pred
))
683 report("MBB has predecessor that isn't part of the function.", MBB
);
684 if (!MBBInfoMap
[Pred
].Succs
.count(MBB
)) {
685 report("Inconsistent CFG", MBB
);
686 errs() << "MBB is not in the successor list of the predecessor "
687 << printMBBReference(*Pred
) << ".\n";
691 const MCAsmInfo
*AsmInfo
= TM
->getMCAsmInfo();
692 const BasicBlock
*BB
= MBB
->getBasicBlock();
693 const Function
&F
= MF
->getFunction();
694 if (LandingPadSuccs
.size() > 1 &&
696 AsmInfo
->getExceptionHandlingType() == ExceptionHandling::SjLj
&&
697 BB
&& isa
<SwitchInst
>(BB
->getTerminator())) &&
698 !isScopedEHPersonality(classifyEHPersonality(F
.getPersonalityFn())))
699 report("MBB has more than one landing pad successor", MBB
);
701 // Call analyzeBranch. If it succeeds, there several more conditions to check.
702 MachineBasicBlock
*TBB
= nullptr, *FBB
= nullptr;
703 SmallVector
<MachineOperand
, 4> Cond
;
704 if (!TII
->analyzeBranch(*const_cast<MachineBasicBlock
*>(MBB
), TBB
, FBB
,
706 // Ok, analyzeBranch thinks it knows what's going on with this block. Let's
707 // check whether its answers match up with reality.
709 // Block falls through to its successor.
710 if (!MBB
->empty() && MBB
->back().isBarrier() &&
711 !TII
->isPredicated(MBB
->back())) {
712 report("MBB exits via unconditional fall-through but ends with a "
713 "barrier instruction!", MBB
);
716 report("MBB exits via unconditional fall-through but has a condition!",
719 } else if (TBB
&& !FBB
&& Cond
.empty()) {
720 // Block unconditionally branches somewhere.
722 report("MBB exits via unconditional branch but doesn't contain "
723 "any instructions!", MBB
);
724 } else if (!MBB
->back().isBarrier()) {
725 report("MBB exits via unconditional branch but doesn't end with a "
726 "barrier instruction!", MBB
);
727 } else if (!MBB
->back().isTerminator()) {
728 report("MBB exits via unconditional branch but the branch isn't a "
729 "terminator instruction!", MBB
);
731 } else if (TBB
&& !FBB
&& !Cond
.empty()) {
732 // Block conditionally branches somewhere, otherwise falls through.
734 report("MBB exits via conditional branch/fall-through but doesn't "
735 "contain any instructions!", MBB
);
736 } else if (MBB
->back().isBarrier()) {
737 report("MBB exits via conditional branch/fall-through but ends with a "
738 "barrier instruction!", MBB
);
739 } else if (!MBB
->back().isTerminator()) {
740 report("MBB exits via conditional branch/fall-through but the branch "
741 "isn't a terminator instruction!", MBB
);
743 } else if (TBB
&& FBB
) {
744 // Block conditionally branches somewhere, otherwise branches
747 report("MBB exits via conditional branch/branch but doesn't "
748 "contain any instructions!", MBB
);
749 } else if (!MBB
->back().isBarrier()) {
750 report("MBB exits via conditional branch/branch but doesn't end with a "
751 "barrier instruction!", MBB
);
752 } else if (!MBB
->back().isTerminator()) {
753 report("MBB exits via conditional branch/branch but the branch "
754 "isn't a terminator instruction!", MBB
);
757 report("MBB exits via conditional branch/branch but there's no "
761 report("analyzeBranch returned invalid data!", MBB
);
764 // Now check that the successors match up with the answers reported by
766 if (TBB
&& !MBB
->isSuccessor(TBB
))
767 report("MBB exits via jump or conditional branch, but its target isn't a "
770 if (FBB
&& !MBB
->isSuccessor(FBB
))
771 report("MBB exits via conditional branch, but its target isn't a CFG "
775 // There might be a fallthrough to the next block if there's either no
776 // unconditional true branch, or if there's a condition, and one of the
777 // branches is missing.
778 bool Fallthrough
= !TBB
|| (!Cond
.empty() && !FBB
);
780 // A conditional fallthrough must be an actual CFG successor, not
781 // unreachable. (Conversely, an unconditional fallthrough might not really
782 // be a successor, because the block might end in unreachable.)
783 if (!Cond
.empty() && !FBB
) {
784 MachineFunction::const_iterator MBBI
= std::next(MBB
->getIterator());
785 if (MBBI
== MF
->end()) {
786 report("MBB conditionally falls through out of function!", MBB
);
787 } else if (!MBB
->isSuccessor(&*MBBI
))
788 report("MBB exits via conditional branch/fall-through but the CFG "
789 "successors don't match the actual successors!",
793 // Verify that there aren't any extra un-accounted-for successors.
794 for (const MachineBasicBlock
*SuccMBB
: MBB
->successors()) {
795 // If this successor is one of the branch targets, it's okay.
796 if (SuccMBB
== TBB
|| SuccMBB
== FBB
)
798 // If we might have a fallthrough, and the successor is the fallthrough
799 // block, that's also ok.
800 if (Fallthrough
&& SuccMBB
== MBB
->getNextNode())
802 // Also accept successors which are for exception-handling or might be
803 // inlineasm_br targets.
804 if (SuccMBB
->isEHPad() || SuccMBB
->isInlineAsmBrIndirectTarget())
806 report("MBB has unexpected successors which are not branch targets, "
807 "fallthrough, EHPads, or inlineasm_br targets.",
813 if (MRI
->tracksLiveness()) {
814 for (const auto &LI
: MBB
->liveins()) {
815 if (!Register::isPhysicalRegister(LI
.PhysReg
)) {
816 report("MBB live-in list contains non-physical register", MBB
);
819 for (const MCPhysReg
&SubReg
: TRI
->subregs_inclusive(LI
.PhysReg
))
820 regsLive
.insert(SubReg
);
824 const MachineFrameInfo
&MFI
= MF
->getFrameInfo();
825 BitVector PR
= MFI
.getPristineRegs(*MF
);
826 for (unsigned I
: PR
.set_bits()) {
827 for (const MCPhysReg
&SubReg
: TRI
->subregs_inclusive(I
))
828 regsLive
.insert(SubReg
);
835 lastIndex
= Indexes
->getMBBStartIdx(MBB
);
838 // This function gets called for all bundle headers, including normal
839 // stand-alone unbundled instructions.
840 void MachineVerifier::visitMachineBundleBefore(const MachineInstr
*MI
) {
841 if (Indexes
&& Indexes
->hasIndex(*MI
)) {
842 SlotIndex idx
= Indexes
->getInstructionIndex(*MI
);
843 if (!(idx
> lastIndex
)) {
844 report("Instruction index out of order", MI
);
845 errs() << "Last instruction was at " << lastIndex
<< '\n';
850 // Ensure non-terminators don't follow terminators.
851 if (MI
->isTerminator()) {
852 if (!FirstTerminator
)
853 FirstTerminator
= MI
;
854 } else if (FirstTerminator
) {
855 // For GlobalISel, G_INVOKE_REGION_START is a terminator that we allow to
856 // precede non-terminators.
857 if (FirstTerminator
->getOpcode() != TargetOpcode::G_INVOKE_REGION_START
) {
858 report("Non-terminator instruction after the first terminator", MI
);
859 errs() << "First terminator was:\t" << *FirstTerminator
;
864 // The operands on an INLINEASM instruction must follow a template.
865 // Verify that the flag operands make sense.
866 void MachineVerifier::verifyInlineAsm(const MachineInstr
*MI
) {
867 // The first two operands on INLINEASM are the asm string and global flags.
868 if (MI
->getNumOperands() < 2) {
869 report("Too few operands on inline asm", MI
);
872 if (!MI
->getOperand(0).isSymbol())
873 report("Asm string must be an external symbol", MI
);
874 if (!MI
->getOperand(1).isImm())
875 report("Asm flags must be an immediate", MI
);
876 // Allowed flags are Extra_HasSideEffects = 1, Extra_IsAlignStack = 2,
877 // Extra_AsmDialect = 4, Extra_MayLoad = 8, and Extra_MayStore = 16,
878 // and Extra_IsConvergent = 32.
879 if (!isUInt
<6>(MI
->getOperand(1).getImm()))
880 report("Unknown asm flags", &MI
->getOperand(1), 1);
882 static_assert(InlineAsm::MIOp_FirstOperand
== 2, "Asm format changed");
884 unsigned OpNo
= InlineAsm::MIOp_FirstOperand
;
886 for (unsigned e
= MI
->getNumOperands(); OpNo
< e
; OpNo
+= NumOps
) {
887 const MachineOperand
&MO
= MI
->getOperand(OpNo
);
888 // There may be implicit ops after the fixed operands.
891 const InlineAsm::Flag
F(MO
.getImm());
892 NumOps
= 1 + F
.getNumOperandRegisters();
895 if (OpNo
> MI
->getNumOperands())
896 report("Missing operands in last group", MI
);
898 // An optional MDNode follows the groups.
899 if (OpNo
< MI
->getNumOperands() && MI
->getOperand(OpNo
).isMetadata())
902 // All trailing operands must be implicit registers.
903 for (unsigned e
= MI
->getNumOperands(); OpNo
< e
; ++OpNo
) {
904 const MachineOperand
&MO
= MI
->getOperand(OpNo
);
905 if (!MO
.isReg() || !MO
.isImplicit())
906 report("Expected implicit register after groups", &MO
, OpNo
);
909 if (MI
->getOpcode() == TargetOpcode::INLINEASM_BR
) {
910 const MachineBasicBlock
*MBB
= MI
->getParent();
912 for (unsigned i
= InlineAsm::MIOp_FirstOperand
, e
= MI
->getNumOperands();
914 const MachineOperand
&MO
= MI
->getOperand(i
);
919 // Check the successor & predecessor lists look ok, assume they are
920 // not. Find the indirect target without going through the successors.
921 const MachineBasicBlock
*IndirectTargetMBB
= MO
.getMBB();
922 if (!IndirectTargetMBB
) {
923 report("INLINEASM_BR indirect target does not exist", &MO
, i
);
927 if (!MBB
->isSuccessor(IndirectTargetMBB
))
928 report("INLINEASM_BR indirect target missing from successor list", &MO
,
931 if (!IndirectTargetMBB
->isPredecessor(MBB
))
932 report("INLINEASM_BR indirect target predecessor list missing parent",
938 bool MachineVerifier::verifyAllRegOpsScalar(const MachineInstr
&MI
,
939 const MachineRegisterInfo
&MRI
) {
940 if (none_of(MI
.explicit_operands(), [&MRI
](const MachineOperand
&Op
) {
943 const auto Reg
= Op
.getReg();
944 if (Reg
.isPhysical())
946 return !MRI
.getType(Reg
).isScalar();
949 report("All register operands must have scalar types", &MI
);
953 /// Check that types are consistent when two operands need to have the same
954 /// number of vector elements.
955 /// \return true if the types are valid.
956 bool MachineVerifier::verifyVectorElementMatch(LLT Ty0
, LLT Ty1
,
957 const MachineInstr
*MI
) {
958 if (Ty0
.isVector() != Ty1
.isVector()) {
959 report("operand types must be all-vector or all-scalar", MI
);
960 // Generally we try to report as many issues as possible at once, but in
961 // this case it's not clear what should we be comparing the size of the
962 // scalar with: the size of the whole vector or its lane. Instead of
963 // making an arbitrary choice and emitting not so helpful message, let's
964 // avoid the extra noise and stop here.
968 if (Ty0
.isVector() && Ty0
.getNumElements() != Ty1
.getNumElements()) {
969 report("operand types must preserve number of vector elements", MI
);
976 bool MachineVerifier::verifyGIntrinsicSideEffects(const MachineInstr
*MI
) {
977 auto Opcode
= MI
->getOpcode();
978 bool NoSideEffects
= Opcode
== TargetOpcode::G_INTRINSIC
||
979 Opcode
== TargetOpcode::G_INTRINSIC_CONVERGENT
;
980 unsigned IntrID
= cast
<GIntrinsic
>(MI
)->getIntrinsicID();
981 if (IntrID
!= 0 && IntrID
< Intrinsic::num_intrinsics
) {
982 AttributeList Attrs
= Intrinsic::getAttributes(
983 MF
->getFunction().getContext(), static_cast<Intrinsic::ID
>(IntrID
));
984 bool DeclHasSideEffects
= !Attrs
.getMemoryEffects().doesNotAccessMemory();
985 if (NoSideEffects
&& DeclHasSideEffects
) {
986 report(Twine(TII
->getName(Opcode
),
987 " used with intrinsic that accesses memory"),
991 if (!NoSideEffects
&& !DeclHasSideEffects
) {
992 report(Twine(TII
->getName(Opcode
), " used with readnone intrinsic"), MI
);
1000 bool MachineVerifier::verifyGIntrinsicConvergence(const MachineInstr
*MI
) {
1001 auto Opcode
= MI
->getOpcode();
1002 bool NotConvergent
= Opcode
== TargetOpcode::G_INTRINSIC
||
1003 Opcode
== TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
;
1004 unsigned IntrID
= cast
<GIntrinsic
>(MI
)->getIntrinsicID();
1005 if (IntrID
!= 0 && IntrID
< Intrinsic::num_intrinsics
) {
1006 AttributeList Attrs
= Intrinsic::getAttributes(
1007 MF
->getFunction().getContext(), static_cast<Intrinsic::ID
>(IntrID
));
1008 bool DeclIsConvergent
= Attrs
.hasFnAttr(Attribute::Convergent
);
1009 if (NotConvergent
&& DeclIsConvergent
) {
1010 report(Twine(TII
->getName(Opcode
), " used with a convergent intrinsic"),
1014 if (!NotConvergent
&& !DeclIsConvergent
) {
1016 Twine(TII
->getName(Opcode
), " used with a non-convergent intrinsic"),
1025 void MachineVerifier::verifyPreISelGenericInstruction(const MachineInstr
*MI
) {
1026 if (isFunctionSelected
)
1027 report("Unexpected generic instruction in a Selected function", MI
);
1029 const MCInstrDesc
&MCID
= MI
->getDesc();
1030 unsigned NumOps
= MI
->getNumOperands();
1032 // Branches must reference a basic block if they are not indirect
1033 if (MI
->isBranch() && !MI
->isIndirectBranch()) {
1034 bool HasMBB
= false;
1035 for (const MachineOperand
&Op
: MI
->operands()) {
1043 report("Branch instruction is missing a basic block operand or "
1044 "isIndirectBranch property",
1050 SmallVector
<LLT
, 4> Types
;
1051 for (unsigned I
= 0, E
= std::min(MCID
.getNumOperands(), NumOps
);
1053 if (!MCID
.operands()[I
].isGenericType())
1055 // Generic instructions specify type equality constraints between some of
1056 // their operands. Make sure these are consistent.
1057 size_t TypeIdx
= MCID
.operands()[I
].getGenericTypeIndex();
1058 Types
.resize(std::max(TypeIdx
+ 1, Types
.size()));
1060 const MachineOperand
*MO
= &MI
->getOperand(I
);
1062 report("generic instruction must use register operands", MI
);
1066 LLT OpTy
= MRI
->getType(MO
->getReg());
1067 // Don't report a type mismatch if there is no actual mismatch, only a
1068 // type missing, to reduce noise:
1069 if (OpTy
.isValid()) {
1070 // Only the first valid type for a type index will be printed: don't
1071 // overwrite it later so it's always clear which type was expected:
1072 if (!Types
[TypeIdx
].isValid())
1073 Types
[TypeIdx
] = OpTy
;
1074 else if (Types
[TypeIdx
] != OpTy
)
1075 report("Type mismatch in generic instruction", MO
, I
, OpTy
);
1077 // Generic instructions must have types attached to their operands.
1078 report("Generic instruction is missing a virtual register type", MO
, I
);
1082 // Generic opcodes must not have physical register operands.
1083 for (unsigned I
= 0; I
< MI
->getNumOperands(); ++I
) {
1084 const MachineOperand
*MO
= &MI
->getOperand(I
);
1085 if (MO
->isReg() && MO
->getReg().isPhysical())
1086 report("Generic instruction cannot have physical register", MO
, I
);
1089 // Avoid out of bounds in checks below. This was already reported earlier.
1090 if (MI
->getNumOperands() < MCID
.getNumOperands())
1093 StringRef ErrorInfo
;
1094 if (!TII
->verifyInstruction(*MI
, ErrorInfo
))
1095 report(ErrorInfo
.data(), MI
);
1097 // Verify properties of various specific instruction types
1098 unsigned Opc
= MI
->getOpcode();
1100 case TargetOpcode::G_ASSERT_SEXT
:
1101 case TargetOpcode::G_ASSERT_ZEXT
: {
1102 std::string OpcName
=
1103 Opc
== TargetOpcode::G_ASSERT_ZEXT
? "G_ASSERT_ZEXT" : "G_ASSERT_SEXT";
1104 if (!MI
->getOperand(2).isImm()) {
1105 report(Twine(OpcName
, " expects an immediate operand #2"), MI
);
1109 Register Dst
= MI
->getOperand(0).getReg();
1110 Register Src
= MI
->getOperand(1).getReg();
1111 LLT SrcTy
= MRI
->getType(Src
);
1112 int64_t Imm
= MI
->getOperand(2).getImm();
1114 report(Twine(OpcName
, " size must be >= 1"), MI
);
1118 if (Imm
>= SrcTy
.getScalarSizeInBits()) {
1119 report(Twine(OpcName
, " size must be less than source bit width"), MI
);
1123 const RegisterBank
*SrcRB
= RBI
->getRegBank(Src
, *MRI
, *TRI
);
1124 const RegisterBank
*DstRB
= RBI
->getRegBank(Dst
, *MRI
, *TRI
);
1126 // Allow only the source bank to be set.
1127 if ((SrcRB
&& DstRB
&& SrcRB
!= DstRB
) || (DstRB
&& !SrcRB
)) {
1128 report(Twine(OpcName
, " cannot change register bank"), MI
);
1132 // Don't allow a class change. Do allow member class->regbank.
1133 const TargetRegisterClass
*DstRC
= MRI
->getRegClassOrNull(Dst
);
1134 if (DstRC
&& DstRC
!= MRI
->getRegClassOrNull(Src
)) {
1136 Twine(OpcName
, " source and destination register classes must match"),
1144 case TargetOpcode::G_CONSTANT
:
1145 case TargetOpcode::G_FCONSTANT
: {
1146 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1147 if (DstTy
.isVector())
1148 report("Instruction cannot use a vector result type", MI
);
1150 if (MI
->getOpcode() == TargetOpcode::G_CONSTANT
) {
1151 if (!MI
->getOperand(1).isCImm()) {
1152 report("G_CONSTANT operand must be cimm", MI
);
1156 const ConstantInt
*CI
= MI
->getOperand(1).getCImm();
1157 if (CI
->getBitWidth() != DstTy
.getSizeInBits())
1158 report("inconsistent constant size", MI
);
1160 if (!MI
->getOperand(1).isFPImm()) {
1161 report("G_FCONSTANT operand must be fpimm", MI
);
1164 const ConstantFP
*CF
= MI
->getOperand(1).getFPImm();
1166 if (APFloat::getSizeInBits(CF
->getValueAPF().getSemantics()) !=
1167 DstTy
.getSizeInBits()) {
1168 report("inconsistent constant size", MI
);
1174 case TargetOpcode::G_LOAD
:
1175 case TargetOpcode::G_STORE
:
1176 case TargetOpcode::G_ZEXTLOAD
:
1177 case TargetOpcode::G_SEXTLOAD
: {
1178 LLT ValTy
= MRI
->getType(MI
->getOperand(0).getReg());
1179 LLT PtrTy
= MRI
->getType(MI
->getOperand(1).getReg());
1180 if (!PtrTy
.isPointer())
1181 report("Generic memory instruction must access a pointer", MI
);
1183 // Generic loads and stores must have a single MachineMemOperand
1184 // describing that access.
1185 if (!MI
->hasOneMemOperand()) {
1186 report("Generic instruction accessing memory must have one mem operand",
1189 const MachineMemOperand
&MMO
= **MI
->memoperands_begin();
1190 if (MI
->getOpcode() == TargetOpcode::G_ZEXTLOAD
||
1191 MI
->getOpcode() == TargetOpcode::G_SEXTLOAD
) {
1192 if (MMO
.getSizeInBits() >= ValTy
.getSizeInBits())
1193 report("Generic extload must have a narrower memory type", MI
);
1194 } else if (MI
->getOpcode() == TargetOpcode::G_LOAD
) {
1195 if (MMO
.getSize() > ValTy
.getSizeInBytes())
1196 report("load memory size cannot exceed result size", MI
);
1197 } else if (MI
->getOpcode() == TargetOpcode::G_STORE
) {
1198 if (ValTy
.getSizeInBytes() < MMO
.getSize())
1199 report("store memory size cannot exceed value size", MI
);
1202 const AtomicOrdering Order
= MMO
.getSuccessOrdering();
1203 if (Opc
== TargetOpcode::G_STORE
) {
1204 if (Order
== AtomicOrdering::Acquire
||
1205 Order
== AtomicOrdering::AcquireRelease
)
1206 report("atomic store cannot use acquire ordering", MI
);
1209 if (Order
== AtomicOrdering::Release
||
1210 Order
== AtomicOrdering::AcquireRelease
)
1211 report("atomic load cannot use release ordering", MI
);
1217 case TargetOpcode::G_PHI
: {
1218 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1219 if (!DstTy
.isValid() || !all_of(drop_begin(MI
->operands()),
1220 [this, &DstTy
](const MachineOperand
&MO
) {
1223 LLT Ty
= MRI
->getType(MO
.getReg());
1224 if (!Ty
.isValid() || (Ty
!= DstTy
))
1228 report("Generic Instruction G_PHI has operands with incompatible/missing "
1233 case TargetOpcode::G_BITCAST
: {
1234 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1235 LLT SrcTy
= MRI
->getType(MI
->getOperand(1).getReg());
1236 if (!DstTy
.isValid() || !SrcTy
.isValid())
1239 if (SrcTy
.isPointer() != DstTy
.isPointer())
1240 report("bitcast cannot convert between pointers and other types", MI
);
1242 if (SrcTy
.getSizeInBits() != DstTy
.getSizeInBits())
1243 report("bitcast sizes must match", MI
);
1246 report("bitcast must change the type", MI
);
1250 case TargetOpcode::G_INTTOPTR
:
1251 case TargetOpcode::G_PTRTOINT
:
1252 case TargetOpcode::G_ADDRSPACE_CAST
: {
1253 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1254 LLT SrcTy
= MRI
->getType(MI
->getOperand(1).getReg());
1255 if (!DstTy
.isValid() || !SrcTy
.isValid())
1258 verifyVectorElementMatch(DstTy
, SrcTy
, MI
);
1260 DstTy
= DstTy
.getScalarType();
1261 SrcTy
= SrcTy
.getScalarType();
1263 if (MI
->getOpcode() == TargetOpcode::G_INTTOPTR
) {
1264 if (!DstTy
.isPointer())
1265 report("inttoptr result type must be a pointer", MI
);
1266 if (SrcTy
.isPointer())
1267 report("inttoptr source type must not be a pointer", MI
);
1268 } else if (MI
->getOpcode() == TargetOpcode::G_PTRTOINT
) {
1269 if (!SrcTy
.isPointer())
1270 report("ptrtoint source type must be a pointer", MI
);
1271 if (DstTy
.isPointer())
1272 report("ptrtoint result type must not be a pointer", MI
);
1274 assert(MI
->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST
);
1275 if (!SrcTy
.isPointer() || !DstTy
.isPointer())
1276 report("addrspacecast types must be pointers", MI
);
1278 if (SrcTy
.getAddressSpace() == DstTy
.getAddressSpace())
1279 report("addrspacecast must convert different address spaces", MI
);
1285 case TargetOpcode::G_PTR_ADD
: {
1286 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1287 LLT PtrTy
= MRI
->getType(MI
->getOperand(1).getReg());
1288 LLT OffsetTy
= MRI
->getType(MI
->getOperand(2).getReg());
1289 if (!DstTy
.isValid() || !PtrTy
.isValid() || !OffsetTy
.isValid())
1292 if (!PtrTy
.getScalarType().isPointer())
1293 report("gep first operand must be a pointer", MI
);
1295 if (OffsetTy
.getScalarType().isPointer())
1296 report("gep offset operand must not be a pointer", MI
);
1298 // TODO: Is the offset allowed to be a scalar with a vector?
1301 case TargetOpcode::G_PTRMASK
: {
1302 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1303 LLT SrcTy
= MRI
->getType(MI
->getOperand(1).getReg());
1304 LLT MaskTy
= MRI
->getType(MI
->getOperand(2).getReg());
1305 if (!DstTy
.isValid() || !SrcTy
.isValid() || !MaskTy
.isValid())
1308 if (!DstTy
.getScalarType().isPointer())
1309 report("ptrmask result type must be a pointer", MI
);
1311 if (!MaskTy
.getScalarType().isScalar())
1312 report("ptrmask mask type must be an integer", MI
);
1314 verifyVectorElementMatch(DstTy
, MaskTy
, MI
);
1317 case TargetOpcode::G_SEXT
:
1318 case TargetOpcode::G_ZEXT
:
1319 case TargetOpcode::G_ANYEXT
:
1320 case TargetOpcode::G_TRUNC
:
1321 case TargetOpcode::G_FPEXT
:
1322 case TargetOpcode::G_FPTRUNC
: {
1323 // Number of operands and presense of types is already checked (and
1324 // reported in case of any issues), so no need to report them again. As
1325 // we're trying to report as many issues as possible at once, however, the
1326 // instructions aren't guaranteed to have the right number of operands or
1327 // types attached to them at this point
1328 assert(MCID
.getNumOperands() == 2 && "Expected 2 operands G_*{EXT,TRUNC}");
1329 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1330 LLT SrcTy
= MRI
->getType(MI
->getOperand(1).getReg());
1331 if (!DstTy
.isValid() || !SrcTy
.isValid())
1334 LLT DstElTy
= DstTy
.getScalarType();
1335 LLT SrcElTy
= SrcTy
.getScalarType();
1336 if (DstElTy
.isPointer() || SrcElTy
.isPointer())
1337 report("Generic extend/truncate can not operate on pointers", MI
);
1339 verifyVectorElementMatch(DstTy
, SrcTy
, MI
);
1341 unsigned DstSize
= DstElTy
.getSizeInBits();
1342 unsigned SrcSize
= SrcElTy
.getSizeInBits();
1343 switch (MI
->getOpcode()) {
1345 if (DstSize
<= SrcSize
)
1346 report("Generic extend has destination type no larger than source", MI
);
1348 case TargetOpcode::G_TRUNC
:
1349 case TargetOpcode::G_FPTRUNC
:
1350 if (DstSize
>= SrcSize
)
1351 report("Generic truncate has destination type no smaller than source",
1357 case TargetOpcode::G_SELECT
: {
1358 LLT SelTy
= MRI
->getType(MI
->getOperand(0).getReg());
1359 LLT CondTy
= MRI
->getType(MI
->getOperand(1).getReg());
1360 if (!SelTy
.isValid() || !CondTy
.isValid())
1363 // Scalar condition select on a vector is valid.
1364 if (CondTy
.isVector())
1365 verifyVectorElementMatch(SelTy
, CondTy
, MI
);
1368 case TargetOpcode::G_MERGE_VALUES
: {
1369 // G_MERGE_VALUES should only be used to merge scalars into a larger scalar,
1370 // e.g. s2N = MERGE sN, sN
1371 // Merging multiple scalars into a vector is not allowed, should use
1372 // G_BUILD_VECTOR for that.
1373 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1374 LLT SrcTy
= MRI
->getType(MI
->getOperand(1).getReg());
1375 if (DstTy
.isVector() || SrcTy
.isVector())
1376 report("G_MERGE_VALUES cannot operate on vectors", MI
);
1378 const unsigned NumOps
= MI
->getNumOperands();
1379 if (DstTy
.getSizeInBits() != SrcTy
.getSizeInBits() * (NumOps
- 1))
1380 report("G_MERGE_VALUES result size is inconsistent", MI
);
1382 for (unsigned I
= 2; I
!= NumOps
; ++I
) {
1383 if (MRI
->getType(MI
->getOperand(I
).getReg()) != SrcTy
)
1384 report("G_MERGE_VALUES source types do not match", MI
);
1389 case TargetOpcode::G_UNMERGE_VALUES
: {
1390 unsigned NumDsts
= MI
->getNumOperands() - 1;
1391 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1392 for (unsigned i
= 1; i
< NumDsts
; ++i
) {
1393 if (MRI
->getType(MI
->getOperand(i
).getReg()) != DstTy
) {
1394 report("G_UNMERGE_VALUES destination types do not match", MI
);
1399 LLT SrcTy
= MRI
->getType(MI
->getOperand(NumDsts
).getReg());
1400 if (DstTy
.isVector()) {
1401 // This case is the converse of G_CONCAT_VECTORS.
1402 if (!SrcTy
.isVector() || SrcTy
.getScalarType() != DstTy
.getScalarType() ||
1403 SrcTy
.getNumElements() != NumDsts
* DstTy
.getNumElements())
1404 report("G_UNMERGE_VALUES source operand does not match vector "
1405 "destination operands",
1407 } else if (SrcTy
.isVector()) {
1408 // This case is the converse of G_BUILD_VECTOR, but relaxed to allow
1409 // mismatched types as long as the total size matches:
1410 // %0:_(s64), %1:_(s64) = G_UNMERGE_VALUES %2:_(<4 x s32>)
1411 if (SrcTy
.getSizeInBits() != NumDsts
* DstTy
.getSizeInBits())
1412 report("G_UNMERGE_VALUES vector source operand does not match scalar "
1413 "destination operands",
1416 // This case is the converse of G_MERGE_VALUES.
1417 if (SrcTy
.getSizeInBits() != NumDsts
* DstTy
.getSizeInBits()) {
1418 report("G_UNMERGE_VALUES scalar source operand does not match scalar "
1419 "destination operands",
1425 case TargetOpcode::G_BUILD_VECTOR
: {
1426 // Source types must be scalars, dest type a vector. Total size of scalars
1427 // must match the dest vector size.
1428 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1429 LLT SrcEltTy
= MRI
->getType(MI
->getOperand(1).getReg());
1430 if (!DstTy
.isVector() || SrcEltTy
.isVector()) {
1431 report("G_BUILD_VECTOR must produce a vector from scalar operands", MI
);
1435 if (DstTy
.getElementType() != SrcEltTy
)
1436 report("G_BUILD_VECTOR result element type must match source type", MI
);
1438 if (DstTy
.getNumElements() != MI
->getNumOperands() - 1)
1439 report("G_BUILD_VECTOR must have an operand for each elemement", MI
);
1441 for (const MachineOperand
&MO
: llvm::drop_begin(MI
->operands(), 2))
1442 if (MRI
->getType(MI
->getOperand(1).getReg()) != MRI
->getType(MO
.getReg()))
1443 report("G_BUILD_VECTOR source operand types are not homogeneous", MI
);
1447 case TargetOpcode::G_BUILD_VECTOR_TRUNC
: {
1448 // Source types must be scalars, dest type a vector. Scalar types must be
1449 // larger than the dest vector elt type, as this is a truncating operation.
1450 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1451 LLT SrcEltTy
= MRI
->getType(MI
->getOperand(1).getReg());
1452 if (!DstTy
.isVector() || SrcEltTy
.isVector())
1453 report("G_BUILD_VECTOR_TRUNC must produce a vector from scalar operands",
1455 for (const MachineOperand
&MO
: llvm::drop_begin(MI
->operands(), 2))
1456 if (MRI
->getType(MI
->getOperand(1).getReg()) != MRI
->getType(MO
.getReg()))
1457 report("G_BUILD_VECTOR_TRUNC source operand types are not homogeneous",
1459 if (SrcEltTy
.getSizeInBits() <= DstTy
.getElementType().getSizeInBits())
1460 report("G_BUILD_VECTOR_TRUNC source operand types are not larger than "
1465 case TargetOpcode::G_CONCAT_VECTORS
: {
1466 // Source types should be vectors, and total size should match the dest
1468 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1469 LLT SrcTy
= MRI
->getType(MI
->getOperand(1).getReg());
1470 if (!DstTy
.isVector() || !SrcTy
.isVector())
1471 report("G_CONCAT_VECTOR requires vector source and destination operands",
1474 if (MI
->getNumOperands() < 3)
1475 report("G_CONCAT_VECTOR requires at least 2 source operands", MI
);
1477 for (const MachineOperand
&MO
: llvm::drop_begin(MI
->operands(), 2))
1478 if (MRI
->getType(MI
->getOperand(1).getReg()) != MRI
->getType(MO
.getReg()))
1479 report("G_CONCAT_VECTOR source operand types are not homogeneous", MI
);
1480 if (DstTy
.getNumElements() !=
1481 SrcTy
.getNumElements() * (MI
->getNumOperands() - 1))
1482 report("G_CONCAT_VECTOR num dest and source elements should match", MI
);
1485 case TargetOpcode::G_ICMP
:
1486 case TargetOpcode::G_FCMP
: {
1487 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1488 LLT SrcTy
= MRI
->getType(MI
->getOperand(2).getReg());
1490 if ((DstTy
.isVector() != SrcTy
.isVector()) ||
1491 (DstTy
.isVector() && DstTy
.getNumElements() != SrcTy
.getNumElements()))
1492 report("Generic vector icmp/fcmp must preserve number of lanes", MI
);
1496 case TargetOpcode::G_EXTRACT
: {
1497 const MachineOperand
&SrcOp
= MI
->getOperand(1);
1498 if (!SrcOp
.isReg()) {
1499 report("extract source must be a register", MI
);
1503 const MachineOperand
&OffsetOp
= MI
->getOperand(2);
1504 if (!OffsetOp
.isImm()) {
1505 report("extract offset must be a constant", MI
);
1509 unsigned DstSize
= MRI
->getType(MI
->getOperand(0).getReg()).getSizeInBits();
1510 unsigned SrcSize
= MRI
->getType(SrcOp
.getReg()).getSizeInBits();
1511 if (SrcSize
== DstSize
)
1512 report("extract source must be larger than result", MI
);
1514 if (DstSize
+ OffsetOp
.getImm() > SrcSize
)
1515 report("extract reads past end of register", MI
);
1518 case TargetOpcode::G_INSERT
: {
1519 const MachineOperand
&SrcOp
= MI
->getOperand(2);
1520 if (!SrcOp
.isReg()) {
1521 report("insert source must be a register", MI
);
1525 const MachineOperand
&OffsetOp
= MI
->getOperand(3);
1526 if (!OffsetOp
.isImm()) {
1527 report("insert offset must be a constant", MI
);
1531 unsigned DstSize
= MRI
->getType(MI
->getOperand(0).getReg()).getSizeInBits();
1532 unsigned SrcSize
= MRI
->getType(SrcOp
.getReg()).getSizeInBits();
1534 if (DstSize
<= SrcSize
)
1535 report("inserted size must be smaller than total register", MI
);
1537 if (SrcSize
+ OffsetOp
.getImm() > DstSize
)
1538 report("insert writes past end of register", MI
);
1542 case TargetOpcode::G_JUMP_TABLE
: {
1543 if (!MI
->getOperand(1).isJTI())
1544 report("G_JUMP_TABLE source operand must be a jump table index", MI
);
1545 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1546 if (!DstTy
.isPointer())
1547 report("G_JUMP_TABLE dest operand must have a pointer type", MI
);
1550 case TargetOpcode::G_BRJT
: {
1551 if (!MRI
->getType(MI
->getOperand(0).getReg()).isPointer())
1552 report("G_BRJT src operand 0 must be a pointer type", MI
);
1554 if (!MI
->getOperand(1).isJTI())
1555 report("G_BRJT src operand 1 must be a jump table index", MI
);
1557 const auto &IdxOp
= MI
->getOperand(2);
1558 if (!IdxOp
.isReg() || MRI
->getType(IdxOp
.getReg()).isPointer())
1559 report("G_BRJT src operand 2 must be a scalar reg type", MI
);
1562 case TargetOpcode::G_INTRINSIC
:
1563 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
:
1564 case TargetOpcode::G_INTRINSIC_CONVERGENT
:
1565 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
: {
1566 // TODO: Should verify number of def and use operands, but the current
1567 // interface requires passing in IR types for mangling.
1568 const MachineOperand
&IntrIDOp
= MI
->getOperand(MI
->getNumExplicitDefs());
1569 if (!IntrIDOp
.isIntrinsicID()) {
1570 report("G_INTRINSIC first src operand must be an intrinsic ID", MI
);
1574 if (!verifyGIntrinsicSideEffects(MI
))
1576 if (!verifyGIntrinsicConvergence(MI
))
1581 case TargetOpcode::G_SEXT_INREG
: {
1582 if (!MI
->getOperand(2).isImm()) {
1583 report("G_SEXT_INREG expects an immediate operand #2", MI
);
1587 LLT SrcTy
= MRI
->getType(MI
->getOperand(1).getReg());
1588 int64_t Imm
= MI
->getOperand(2).getImm();
1590 report("G_SEXT_INREG size must be >= 1", MI
);
1591 if (Imm
>= SrcTy
.getScalarSizeInBits())
1592 report("G_SEXT_INREG size must be less than source bit width", MI
);
1595 case TargetOpcode::G_BSWAP
: {
1596 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1597 if (DstTy
.getScalarSizeInBits() % 16 != 0)
1598 report("G_BSWAP size must be a multiple of 16 bits", MI
);
1601 case TargetOpcode::G_SHUFFLE_VECTOR
: {
1602 const MachineOperand
&MaskOp
= MI
->getOperand(3);
1603 if (!MaskOp
.isShuffleMask()) {
1604 report("Incorrect mask operand type for G_SHUFFLE_VECTOR", MI
);
1608 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1609 LLT Src0Ty
= MRI
->getType(MI
->getOperand(1).getReg());
1610 LLT Src1Ty
= MRI
->getType(MI
->getOperand(2).getReg());
1612 if (Src0Ty
!= Src1Ty
)
1613 report("Source operands must be the same type", MI
);
1615 if (Src0Ty
.getScalarType() != DstTy
.getScalarType())
1616 report("G_SHUFFLE_VECTOR cannot change element type", MI
);
1618 // Don't check that all operands are vector because scalars are used in
1619 // place of 1 element vectors.
1620 int SrcNumElts
= Src0Ty
.isVector() ? Src0Ty
.getNumElements() : 1;
1621 int DstNumElts
= DstTy
.isVector() ? DstTy
.getNumElements() : 1;
1623 ArrayRef
<int> MaskIdxes
= MaskOp
.getShuffleMask();
1625 if (static_cast<int>(MaskIdxes
.size()) != DstNumElts
)
1626 report("Wrong result type for shufflemask", MI
);
1628 for (int Idx
: MaskIdxes
) {
1632 if (Idx
>= 2 * SrcNumElts
)
1633 report("Out of bounds shuffle index", MI
);
1638 case TargetOpcode::G_DYN_STACKALLOC
: {
1639 const MachineOperand
&DstOp
= MI
->getOperand(0);
1640 const MachineOperand
&AllocOp
= MI
->getOperand(1);
1641 const MachineOperand
&AlignOp
= MI
->getOperand(2);
1643 if (!DstOp
.isReg() || !MRI
->getType(DstOp
.getReg()).isPointer()) {
1644 report("dst operand 0 must be a pointer type", MI
);
1648 if (!AllocOp
.isReg() || !MRI
->getType(AllocOp
.getReg()).isScalar()) {
1649 report("src operand 1 must be a scalar reg type", MI
);
1653 if (!AlignOp
.isImm()) {
1654 report("src operand 2 must be an immediate type", MI
);
1659 case TargetOpcode::G_MEMCPY_INLINE
:
1660 case TargetOpcode::G_MEMCPY
:
1661 case TargetOpcode::G_MEMMOVE
: {
1662 ArrayRef
<MachineMemOperand
*> MMOs
= MI
->memoperands();
1663 if (MMOs
.size() != 2) {
1664 report("memcpy/memmove must have 2 memory operands", MI
);
1668 if ((!MMOs
[0]->isStore() || MMOs
[0]->isLoad()) ||
1669 (MMOs
[1]->isStore() || !MMOs
[1]->isLoad())) {
1670 report("wrong memory operand types", MI
);
1674 if (MMOs
[0]->getSize() != MMOs
[1]->getSize())
1675 report("inconsistent memory operand sizes", MI
);
1677 LLT DstPtrTy
= MRI
->getType(MI
->getOperand(0).getReg());
1678 LLT SrcPtrTy
= MRI
->getType(MI
->getOperand(1).getReg());
1680 if (!DstPtrTy
.isPointer() || !SrcPtrTy
.isPointer()) {
1681 report("memory instruction operand must be a pointer", MI
);
1685 if (DstPtrTy
.getAddressSpace() != MMOs
[0]->getAddrSpace())
1686 report("inconsistent store address space", MI
);
1687 if (SrcPtrTy
.getAddressSpace() != MMOs
[1]->getAddrSpace())
1688 report("inconsistent load address space", MI
);
1690 if (Opc
!= TargetOpcode::G_MEMCPY_INLINE
)
1691 if (!MI
->getOperand(3).isImm() || (MI
->getOperand(3).getImm() & ~1LL))
1692 report("'tail' flag (operand 3) must be an immediate 0 or 1", MI
);
1696 case TargetOpcode::G_BZERO
:
1697 case TargetOpcode::G_MEMSET
: {
1698 ArrayRef
<MachineMemOperand
*> MMOs
= MI
->memoperands();
1699 std::string Name
= Opc
== TargetOpcode::G_MEMSET
? "memset" : "bzero";
1700 if (MMOs
.size() != 1) {
1701 report(Twine(Name
, " must have 1 memory operand"), MI
);
1705 if ((!MMOs
[0]->isStore() || MMOs
[0]->isLoad())) {
1706 report(Twine(Name
, " memory operand must be a store"), MI
);
1710 LLT DstPtrTy
= MRI
->getType(MI
->getOperand(0).getReg());
1711 if (!DstPtrTy
.isPointer()) {
1712 report(Twine(Name
, " operand must be a pointer"), MI
);
1716 if (DstPtrTy
.getAddressSpace() != MMOs
[0]->getAddrSpace())
1717 report("inconsistent " + Twine(Name
, " address space"), MI
);
1719 if (!MI
->getOperand(MI
->getNumOperands() - 1).isImm() ||
1720 (MI
->getOperand(MI
->getNumOperands() - 1).getImm() & ~1LL))
1721 report("'tail' flag (last operand) must be an immediate 0 or 1", MI
);
1725 case TargetOpcode::G_VECREDUCE_SEQ_FADD
:
1726 case TargetOpcode::G_VECREDUCE_SEQ_FMUL
: {
1727 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1728 LLT Src1Ty
= MRI
->getType(MI
->getOperand(1).getReg());
1729 LLT Src2Ty
= MRI
->getType(MI
->getOperand(2).getReg());
1730 if (!DstTy
.isScalar())
1731 report("Vector reduction requires a scalar destination type", MI
);
1732 if (!Src1Ty
.isScalar())
1733 report("Sequential FADD/FMUL vector reduction requires a scalar 1st operand", MI
);
1734 if (!Src2Ty
.isVector())
1735 report("Sequential FADD/FMUL vector reduction must have a vector 2nd operand", MI
);
1738 case TargetOpcode::G_VECREDUCE_FADD
:
1739 case TargetOpcode::G_VECREDUCE_FMUL
:
1740 case TargetOpcode::G_VECREDUCE_FMAX
:
1741 case TargetOpcode::G_VECREDUCE_FMIN
:
1742 case TargetOpcode::G_VECREDUCE_FMAXIMUM
:
1743 case TargetOpcode::G_VECREDUCE_FMINIMUM
:
1744 case TargetOpcode::G_VECREDUCE_ADD
:
1745 case TargetOpcode::G_VECREDUCE_MUL
:
1746 case TargetOpcode::G_VECREDUCE_AND
:
1747 case TargetOpcode::G_VECREDUCE_OR
:
1748 case TargetOpcode::G_VECREDUCE_XOR
:
1749 case TargetOpcode::G_VECREDUCE_SMAX
:
1750 case TargetOpcode::G_VECREDUCE_SMIN
:
1751 case TargetOpcode::G_VECREDUCE_UMAX
:
1752 case TargetOpcode::G_VECREDUCE_UMIN
: {
1753 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1754 if (!DstTy
.isScalar())
1755 report("Vector reduction requires a scalar destination type", MI
);
1759 case TargetOpcode::G_SBFX
:
1760 case TargetOpcode::G_UBFX
: {
1761 LLT DstTy
= MRI
->getType(MI
->getOperand(0).getReg());
1762 if (DstTy
.isVector()) {
1763 report("Bitfield extraction is not supported on vectors", MI
);
1768 case TargetOpcode::G_SHL
:
1769 case TargetOpcode::G_LSHR
:
1770 case TargetOpcode::G_ASHR
:
1771 case TargetOpcode::G_ROTR
:
1772 case TargetOpcode::G_ROTL
: {
1773 LLT Src1Ty
= MRI
->getType(MI
->getOperand(1).getReg());
1774 LLT Src2Ty
= MRI
->getType(MI
->getOperand(2).getReg());
1775 if (Src1Ty
.isVector() != Src2Ty
.isVector()) {
1776 report("Shifts and rotates require operands to be either all scalars or "
1783 case TargetOpcode::G_LLROUND
:
1784 case TargetOpcode::G_LROUND
: {
1785 verifyAllRegOpsScalar(*MI
, *MRI
);
1788 case TargetOpcode::G_IS_FPCLASS
: {
1789 LLT DestTy
= MRI
->getType(MI
->getOperand(0).getReg());
1790 LLT DestEltTy
= DestTy
.getScalarType();
1791 if (!DestEltTy
.isScalar()) {
1792 report("Destination must be a scalar or vector of scalars", MI
);
1795 LLT SrcTy
= MRI
->getType(MI
->getOperand(1).getReg());
1796 LLT SrcEltTy
= SrcTy
.getScalarType();
1797 if (!SrcEltTy
.isScalar()) {
1798 report("Source must be a scalar or vector of scalars", MI
);
1801 if (!verifyVectorElementMatch(DestTy
, SrcTy
, MI
))
1803 const MachineOperand
&TestMO
= MI
->getOperand(2);
1804 if (!TestMO
.isImm()) {
1805 report("floating-point class set (operand 2) must be an immediate", MI
);
1808 int64_t Test
= TestMO
.getImm();
1809 if (Test
< 0 || Test
> fcAllFlags
) {
1810 report("Incorrect floating-point class set (operand 2)", MI
);
1815 case TargetOpcode::G_ASSERT_ALIGN
: {
1816 if (MI
->getOperand(2).getImm() < 1)
1817 report("alignment immediate must be >= 1", MI
);
1820 case TargetOpcode::G_CONSTANT_POOL
: {
1821 if (!MI
->getOperand(1).isCPI())
1822 report("Src operand 1 must be a constant pool index", MI
);
1823 if (!MRI
->getType(MI
->getOperand(0).getReg()).isPointer())
1824 report("Dst operand 0 must be a pointer", MI
);
1832 void MachineVerifier::visitMachineInstrBefore(const MachineInstr
*MI
) {
1833 const MCInstrDesc
&MCID
= MI
->getDesc();
1834 if (MI
->getNumOperands() < MCID
.getNumOperands()) {
1835 report("Too few operands", MI
);
1836 errs() << MCID
.getNumOperands() << " operands expected, but "
1837 << MI
->getNumOperands() << " given.\n";
1840 if (MI
->getFlag(MachineInstr::NoConvergent
) && !MCID
.isConvergent())
1841 report("NoConvergent flag expected only on convergent instructions.", MI
);
1844 if (MF
->getProperties().hasProperty(
1845 MachineFunctionProperties::Property::NoPHIs
))
1846 report("Found PHI instruction with NoPHIs property set", MI
);
1849 report("Found PHI instruction after non-PHI", MI
);
1850 } else if (FirstNonPHI
== nullptr)
1853 // Check the tied operands.
1854 if (MI
->isInlineAsm())
1855 verifyInlineAsm(MI
);
1857 // Check that unspillable terminators define a reg and have at most one use.
1858 if (TII
->isUnspillableTerminator(MI
)) {
1859 if (!MI
->getOperand(0).isReg() || !MI
->getOperand(0).isDef())
1860 report("Unspillable Terminator does not define a reg", MI
);
1861 Register Def
= MI
->getOperand(0).getReg();
1862 if (Def
.isVirtual() &&
1863 !MF
->getProperties().hasProperty(
1864 MachineFunctionProperties::Property::NoPHIs
) &&
1865 std::distance(MRI
->use_nodbg_begin(Def
), MRI
->use_nodbg_end()) > 1)
1866 report("Unspillable Terminator expected to have at most one use!", MI
);
1869 // A fully-formed DBG_VALUE must have a location. Ignore partially formed
1870 // DBG_VALUEs: these are convenient to use in tests, but should never get
1872 if (MI
->isDebugValue() && MI
->getNumOperands() == 4)
1873 if (!MI
->getDebugLoc())
1874 report("Missing DebugLoc for debug instruction", MI
);
1876 // Meta instructions should never be the subject of debug value tracking,
1877 // they don't create a value in the output program at all.
1878 if (MI
->isMetaInstruction() && MI
->peekDebugInstrNum())
1879 report("Metadata instruction should not have a value tracking number", MI
);
1881 // Check the MachineMemOperands for basic consistency.
1882 for (MachineMemOperand
*Op
: MI
->memoperands()) {
1883 if (Op
->isLoad() && !MI
->mayLoad())
1884 report("Missing mayLoad flag", MI
);
1885 if (Op
->isStore() && !MI
->mayStore())
1886 report("Missing mayStore flag", MI
);
1889 // Debug values must not have a slot index.
1890 // Other instructions must have one, unless they are inside a bundle.
1892 bool mapped
= !LiveInts
->isNotInMIMap(*MI
);
1893 if (MI
->isDebugOrPseudoInstr()) {
1895 report("Debug instruction has a slot index", MI
);
1896 } else if (MI
->isInsideBundle()) {
1898 report("Instruction inside bundle has a slot index", MI
);
1901 report("Missing slot index", MI
);
1905 unsigned Opc
= MCID
.getOpcode();
1906 if (isPreISelGenericOpcode(Opc
) || isPreISelGenericOptimizationHint(Opc
)) {
1907 verifyPreISelGenericInstruction(MI
);
1911 StringRef ErrorInfo
;
1912 if (!TII
->verifyInstruction(*MI
, ErrorInfo
))
1913 report(ErrorInfo
.data(), MI
);
1915 // Verify properties of various specific instruction types
1916 switch (MI
->getOpcode()) {
1917 case TargetOpcode::COPY
: {
1918 const MachineOperand
&DstOp
= MI
->getOperand(0);
1919 const MachineOperand
&SrcOp
= MI
->getOperand(1);
1920 const Register SrcReg
= SrcOp
.getReg();
1921 const Register DstReg
= DstOp
.getReg();
1923 LLT DstTy
= MRI
->getType(DstReg
);
1924 LLT SrcTy
= MRI
->getType(SrcReg
);
1925 if (SrcTy
.isValid() && DstTy
.isValid()) {
1926 // If both types are valid, check that the types are the same.
1927 if (SrcTy
!= DstTy
) {
1928 report("Copy Instruction is illegal with mismatching types", MI
);
1929 errs() << "Def = " << DstTy
<< ", Src = " << SrcTy
<< "\n";
1935 if (!SrcTy
.isValid() && !DstTy
.isValid())
1938 // If we have only one valid type, this is likely a copy between a virtual
1939 // and physical register.
1940 unsigned SrcSize
= 0;
1941 unsigned DstSize
= 0;
1942 if (SrcReg
.isPhysical() && DstTy
.isValid()) {
1943 const TargetRegisterClass
*SrcRC
=
1944 TRI
->getMinimalPhysRegClassLLT(SrcReg
, DstTy
);
1946 SrcSize
= TRI
->getRegSizeInBits(*SrcRC
);
1950 SrcSize
= TRI
->getRegSizeInBits(SrcReg
, *MRI
);
1952 if (DstReg
.isPhysical() && SrcTy
.isValid()) {
1953 const TargetRegisterClass
*DstRC
=
1954 TRI
->getMinimalPhysRegClassLLT(DstReg
, SrcTy
);
1956 DstSize
= TRI
->getRegSizeInBits(*DstRC
);
1960 DstSize
= TRI
->getRegSizeInBits(DstReg
, *MRI
);
1962 if (SrcSize
!= 0 && DstSize
!= 0 && SrcSize
!= DstSize
) {
1963 if (!DstOp
.getSubReg() && !SrcOp
.getSubReg()) {
1964 report("Copy Instruction is illegal with mismatching sizes", MI
);
1965 errs() << "Def Size = " << DstSize
<< ", Src Size = " << SrcSize
1971 case TargetOpcode::STATEPOINT
: {
1972 StatepointOpers
SO(MI
);
1973 if (!MI
->getOperand(SO
.getIDPos()).isImm() ||
1974 !MI
->getOperand(SO
.getNBytesPos()).isImm() ||
1975 !MI
->getOperand(SO
.getNCallArgsPos()).isImm()) {
1976 report("meta operands to STATEPOINT not constant!", MI
);
1980 auto VerifyStackMapConstant
= [&](unsigned Offset
) {
1981 if (Offset
>= MI
->getNumOperands()) {
1982 report("stack map constant to STATEPOINT is out of range!", MI
);
1985 if (!MI
->getOperand(Offset
- 1).isImm() ||
1986 MI
->getOperand(Offset
- 1).getImm() != StackMaps::ConstantOp
||
1987 !MI
->getOperand(Offset
).isImm())
1988 report("stack map constant to STATEPOINT not well formed!", MI
);
1990 VerifyStackMapConstant(SO
.getCCIdx());
1991 VerifyStackMapConstant(SO
.getFlagsIdx());
1992 VerifyStackMapConstant(SO
.getNumDeoptArgsIdx());
1993 VerifyStackMapConstant(SO
.getNumGCPtrIdx());
1994 VerifyStackMapConstant(SO
.getNumAllocaIdx());
1995 VerifyStackMapConstant(SO
.getNumGcMapEntriesIdx());
1997 // Verify that all explicit statepoint defs are tied to gc operands as
1998 // they are expected to be a relocation of gc operands.
1999 unsigned FirstGCPtrIdx
= SO
.getFirstGCPtrIdx();
2000 unsigned LastGCPtrIdx
= SO
.getNumAllocaIdx() - 2;
2001 for (unsigned Idx
= 0; Idx
< MI
->getNumDefs(); Idx
++) {
2003 if (!MI
->isRegTiedToUseOperand(Idx
, &UseOpIdx
)) {
2004 report("STATEPOINT defs expected to be tied", MI
);
2007 if (UseOpIdx
< FirstGCPtrIdx
|| UseOpIdx
> LastGCPtrIdx
) {
2008 report("STATEPOINT def tied to non-gc operand", MI
);
2013 // TODO: verify we have properly encoded deopt arguments
2015 case TargetOpcode::INSERT_SUBREG
: {
2016 unsigned InsertedSize
;
2017 if (unsigned SubIdx
= MI
->getOperand(2).getSubReg())
2018 InsertedSize
= TRI
->getSubRegIdxSize(SubIdx
);
2020 InsertedSize
= TRI
->getRegSizeInBits(MI
->getOperand(2).getReg(), *MRI
);
2021 unsigned SubRegSize
= TRI
->getSubRegIdxSize(MI
->getOperand(3).getImm());
2022 if (SubRegSize
< InsertedSize
) {
2023 report("INSERT_SUBREG expected inserted value to have equal or lesser "
2024 "size than the subreg it was inserted into", MI
);
2028 case TargetOpcode::REG_SEQUENCE
: {
2029 unsigned NumOps
= MI
->getNumOperands();
2030 if (!(NumOps
& 1)) {
2031 report("Invalid number of operands for REG_SEQUENCE", MI
);
2035 for (unsigned I
= 1; I
!= NumOps
; I
+= 2) {
2036 const MachineOperand
&RegOp
= MI
->getOperand(I
);
2037 const MachineOperand
&SubRegOp
= MI
->getOperand(I
+ 1);
2040 report("Invalid register operand for REG_SEQUENCE", &RegOp
, I
);
2042 if (!SubRegOp
.isImm() || SubRegOp
.getImm() == 0 ||
2043 SubRegOp
.getImm() >= TRI
->getNumSubRegIndices()) {
2044 report("Invalid subregister index operand for REG_SEQUENCE",
2049 Register DstReg
= MI
->getOperand(0).getReg();
2050 if (DstReg
.isPhysical())
2051 report("REG_SEQUENCE does not support physical register results", MI
);
2053 if (MI
->getOperand(0).getSubReg())
2054 report("Invalid subreg result for REG_SEQUENCE", MI
);
2062 MachineVerifier::visitMachineOperand(const MachineOperand
*MO
, unsigned MONum
) {
2063 const MachineInstr
*MI
= MO
->getParent();
2064 const MCInstrDesc
&MCID
= MI
->getDesc();
2065 unsigned NumDefs
= MCID
.getNumDefs();
2066 if (MCID
.getOpcode() == TargetOpcode::PATCHPOINT
)
2067 NumDefs
= (MONum
== 0 && MO
->isReg()) ? NumDefs
: 0;
2069 // The first MCID.NumDefs operands must be explicit register defines
2070 if (MONum
< NumDefs
) {
2071 const MCOperandInfo
&MCOI
= MCID
.operands()[MONum
];
2073 report("Explicit definition must be a register", MO
, MONum
);
2074 else if (!MO
->isDef() && !MCOI
.isOptionalDef())
2075 report("Explicit definition marked as use", MO
, MONum
);
2076 else if (MO
->isImplicit())
2077 report("Explicit definition marked as implicit", MO
, MONum
);
2078 } else if (MONum
< MCID
.getNumOperands()) {
2079 const MCOperandInfo
&MCOI
= MCID
.operands()[MONum
];
2080 // Don't check if it's the last operand in a variadic instruction. See,
2081 // e.g., LDM_RET in the arm back end. Check non-variadic operands only.
2082 bool IsOptional
= MI
->isVariadic() && MONum
== MCID
.getNumOperands() - 1;
2085 if (MO
->isDef() && !MCOI
.isOptionalDef() && !MCID
.variadicOpsAreDefs())
2086 report("Explicit operand marked as def", MO
, MONum
);
2087 if (MO
->isImplicit())
2088 report("Explicit operand marked as implicit", MO
, MONum
);
2091 // Check that an instruction has register operands only as expected.
2092 if (MCOI
.OperandType
== MCOI::OPERAND_REGISTER
&&
2093 !MO
->isReg() && !MO
->isFI())
2094 report("Expected a register operand.", MO
, MONum
);
2096 if (MCOI
.OperandType
== MCOI::OPERAND_IMMEDIATE
||
2097 (MCOI
.OperandType
== MCOI::OPERAND_PCREL
&&
2098 !TII
->isPCRelRegisterOperandLegal(*MO
)))
2099 report("Expected a non-register operand.", MO
, MONum
);
2103 int TiedTo
= MCID
.getOperandConstraint(MONum
, MCOI::TIED_TO
);
2106 report("Tied use must be a register", MO
, MONum
);
2107 else if (!MO
->isTied())
2108 report("Operand should be tied", MO
, MONum
);
2109 else if (unsigned(TiedTo
) != MI
->findTiedOperandIdx(MONum
))
2110 report("Tied def doesn't match MCInstrDesc", MO
, MONum
);
2111 else if (MO
->getReg().isPhysical()) {
2112 const MachineOperand
&MOTied
= MI
->getOperand(TiedTo
);
2113 if (!MOTied
.isReg())
2114 report("Tied counterpart must be a register", &MOTied
, TiedTo
);
2115 else if (MOTied
.getReg().isPhysical() &&
2116 MO
->getReg() != MOTied
.getReg())
2117 report("Tied physical registers must match.", &MOTied
, TiedTo
);
2119 } else if (MO
->isReg() && MO
->isTied())
2120 report("Explicit operand should not be tied", MO
, MONum
);
2122 // ARM adds %reg0 operands to indicate predicates. We'll allow that.
2123 if (MO
->isReg() && !MO
->isImplicit() && !MI
->isVariadic() && MO
->getReg())
2124 report("Extra explicit operand on non-variadic instruction", MO
, MONum
);
2127 switch (MO
->getType()) {
2128 case MachineOperand::MO_Register
: {
2129 // Verify debug flag on debug instructions. Check this first because reg0
2130 // indicates an undefined debug value.
2131 if (MI
->isDebugInstr() && MO
->isUse()) {
2133 report("Register operand must be marked debug", MO
, MONum
);
2134 } else if (MO
->isDebug()) {
2135 report("Register operand must not be marked debug", MO
, MONum
);
2138 const Register Reg
= MO
->getReg();
2141 if (MRI
->tracksLiveness() && !MI
->isDebugInstr())
2142 checkLiveness(MO
, MONum
);
2144 if (MO
->isDef() && MO
->isUndef() && !MO
->getSubReg() &&
2145 MO
->getReg().isVirtual()) // TODO: Apply to physregs too
2146 report("Undef virtual register def operands require a subregister", MO
, MONum
);
2148 // Verify the consistency of tied operands.
2150 unsigned OtherIdx
= MI
->findTiedOperandIdx(MONum
);
2151 const MachineOperand
&OtherMO
= MI
->getOperand(OtherIdx
);
2152 if (!OtherMO
.isReg())
2153 report("Must be tied to a register", MO
, MONum
);
2154 if (!OtherMO
.isTied())
2155 report("Missing tie flags on tied operand", MO
, MONum
);
2156 if (MI
->findTiedOperandIdx(OtherIdx
) != MONum
)
2157 report("Inconsistent tie links", MO
, MONum
);
2158 if (MONum
< MCID
.getNumDefs()) {
2159 if (OtherIdx
< MCID
.getNumOperands()) {
2160 if (-1 == MCID
.getOperandConstraint(OtherIdx
, MCOI::TIED_TO
))
2161 report("Explicit def tied to explicit use without tie constraint",
2164 if (!OtherMO
.isImplicit())
2165 report("Explicit def should be tied to implicit use", MO
, MONum
);
2170 // Verify two-address constraints after the twoaddressinstruction pass.
2171 // Both twoaddressinstruction pass and phi-node-elimination pass call
2172 // MRI->leaveSSA() to set MF as not IsSSA, we should do the verification
2173 // after twoaddressinstruction pass not after phi-node-elimination pass. So
2174 // we shouldn't use the IsSSA as the condition, we should based on
2175 // TiedOpsRewritten property to verify two-address constraints, this
2176 // property will be set in twoaddressinstruction pass.
2178 if (MF
->getProperties().hasProperty(
2179 MachineFunctionProperties::Property::TiedOpsRewritten
) &&
2180 MO
->isUse() && MI
->isRegTiedToDefOperand(MONum
, &DefIdx
) &&
2181 Reg
!= MI
->getOperand(DefIdx
).getReg())
2182 report("Two-address instruction operands must be identical", MO
, MONum
);
2184 // Check register classes.
2185 unsigned SubIdx
= MO
->getSubReg();
2187 if (Reg
.isPhysical()) {
2189 report("Illegal subregister index for physical register", MO
, MONum
);
2192 if (MONum
< MCID
.getNumOperands()) {
2193 if (const TargetRegisterClass
*DRC
=
2194 TII
->getRegClass(MCID
, MONum
, TRI
, *MF
)) {
2195 if (!DRC
->contains(Reg
)) {
2196 report("Illegal physical register for instruction", MO
, MONum
);
2197 errs() << printReg(Reg
, TRI
) << " is not a "
2198 << TRI
->getRegClassName(DRC
) << " register.\n";
2202 if (MO
->isRenamable()) {
2203 if (MRI
->isReserved(Reg
)) {
2204 report("isRenamable set on reserved register", MO
, MONum
);
2209 // Virtual register.
2210 const TargetRegisterClass
*RC
= MRI
->getRegClassOrNull(Reg
);
2212 // This is a generic virtual register.
2214 // Do not allow undef uses for generic virtual registers. This ensures
2215 // getVRegDef can never fail and return null on a generic register.
2217 // FIXME: This restriction should probably be broadened to all SSA
2218 // MIR. However, DetectDeadLanes/ProcessImplicitDefs technically still
2219 // run on the SSA function just before phi elimination.
2221 report("Generic virtual register use cannot be undef", MO
, MONum
);
2223 // Debug value instruction is permitted to use undefined vregs.
2224 // This is a performance measure to skip the overhead of immediately
2225 // pruning unused debug operands. The final undef substitution occurs
2226 // when debug values are allocated in LDVImpl::handleDebugValue, so
2227 // these verifications always apply after this pass.
2228 if (isFunctionTracksDebugUserValues
|| !MO
->isUse() ||
2229 !MI
->isDebugValue() || !MRI
->def_empty(Reg
)) {
2230 // If we're post-Select, we can't have gvregs anymore.
2231 if (isFunctionSelected
) {
2232 report("Generic virtual register invalid in a Selected function",
2237 // The gvreg must have a type and it must not have a SubIdx.
2238 LLT Ty
= MRI
->getType(Reg
);
2239 if (!Ty
.isValid()) {
2240 report("Generic virtual register must have a valid type", MO
,
2245 const RegisterBank
*RegBank
= MRI
->getRegBankOrNull(Reg
);
2246 const RegisterBankInfo
*RBI
= MF
->getSubtarget().getRegBankInfo();
2248 // If we're post-RegBankSelect, the gvreg must have a bank.
2249 if (!RegBank
&& isFunctionRegBankSelected
) {
2250 report("Generic virtual register must have a bank in a "
2251 "RegBankSelected function",
2256 // Make sure the register fits into its register bank if any.
2257 if (RegBank
&& Ty
.isValid() &&
2258 RBI
->getMaximumSize(RegBank
->getID()) < Ty
.getSizeInBits()) {
2259 report("Register bank is too small for virtual register", MO
,
2261 errs() << "Register bank " << RegBank
->getName() << " too small("
2262 << RBI
->getMaximumSize(RegBank
->getID()) << ") to fit "
2263 << Ty
.getSizeInBits() << "-bits\n";
2269 report("Generic virtual register does not allow subregister index", MO
,
2274 // If this is a target specific instruction and this operand
2275 // has register class constraint, the virtual register must
2277 if (!isPreISelGenericOpcode(MCID
.getOpcode()) &&
2278 MONum
< MCID
.getNumOperands() &&
2279 TII
->getRegClass(MCID
, MONum
, TRI
, *MF
)) {
2280 report("Virtual register does not match instruction constraint", MO
,
2282 errs() << "Expect register class "
2283 << TRI
->getRegClassName(
2284 TII
->getRegClass(MCID
, MONum
, TRI
, *MF
))
2285 << " but got nothing\n";
2292 const TargetRegisterClass
*SRC
=
2293 TRI
->getSubClassWithSubReg(RC
, SubIdx
);
2295 report("Invalid subregister index for virtual register", MO
, MONum
);
2296 errs() << "Register class " << TRI
->getRegClassName(RC
)
2297 << " does not support subreg index " << SubIdx
<< "\n";
2301 report("Invalid register class for subregister index", MO
, MONum
);
2302 errs() << "Register class " << TRI
->getRegClassName(RC
)
2303 << " does not fully support subreg index " << SubIdx
<< "\n";
2307 if (MONum
< MCID
.getNumOperands()) {
2308 if (const TargetRegisterClass
*DRC
=
2309 TII
->getRegClass(MCID
, MONum
, TRI
, *MF
)) {
2311 const TargetRegisterClass
*SuperRC
=
2312 TRI
->getLargestLegalSuperClass(RC
, *MF
);
2314 report("No largest legal super class exists.", MO
, MONum
);
2317 DRC
= TRI
->getMatchingSuperRegClass(SuperRC
, DRC
, SubIdx
);
2319 report("No matching super-reg register class.", MO
, MONum
);
2323 if (!RC
->hasSuperClassEq(DRC
)) {
2324 report("Illegal virtual register for instruction", MO
, MONum
);
2325 errs() << "Expected a " << TRI
->getRegClassName(DRC
)
2326 << " register, but got a " << TRI
->getRegClassName(RC
)
2335 case MachineOperand::MO_RegisterMask
:
2336 regMasks
.push_back(MO
->getRegMask());
2339 case MachineOperand::MO_MachineBasicBlock
:
2340 if (MI
->isPHI() && !MO
->getMBB()->isSuccessor(MI
->getParent()))
2341 report("PHI operand is not in the CFG", MO
, MONum
);
2344 case MachineOperand::MO_FrameIndex
:
2345 if (LiveStks
&& LiveStks
->hasInterval(MO
->getIndex()) &&
2346 LiveInts
&& !LiveInts
->isNotInMIMap(*MI
)) {
2347 int FI
= MO
->getIndex();
2348 LiveInterval
&LI
= LiveStks
->getInterval(FI
);
2349 SlotIndex Idx
= LiveInts
->getInstructionIndex(*MI
);
2351 bool stores
= MI
->mayStore();
2352 bool loads
= MI
->mayLoad();
2353 // For a memory-to-memory move, we need to check if the frame
2354 // index is used for storing or loading, by inspecting the
2356 if (stores
&& loads
) {
2357 for (auto *MMO
: MI
->memoperands()) {
2358 const PseudoSourceValue
*PSV
= MMO
->getPseudoValue();
2359 if (PSV
== nullptr) continue;
2360 const FixedStackPseudoSourceValue
*Value
=
2361 dyn_cast
<FixedStackPseudoSourceValue
>(PSV
);
2362 if (Value
== nullptr) continue;
2363 if (Value
->getFrameIndex() != FI
) continue;
2371 if (loads
== stores
)
2372 report("Missing fixed stack memoperand.", MI
);
2374 if (loads
&& !LI
.liveAt(Idx
.getRegSlot(true))) {
2375 report("Instruction loads from dead spill slot", MO
, MONum
);
2376 errs() << "Live stack: " << LI
<< '\n';
2378 if (stores
&& !LI
.liveAt(Idx
.getRegSlot())) {
2379 report("Instruction stores to dead spill slot", MO
, MONum
);
2380 errs() << "Live stack: " << LI
<< '\n';
2385 case MachineOperand::MO_CFIIndex
:
2386 if (MO
->getCFIIndex() >= MF
->getFrameInstructions().size())
2387 report("CFI instruction has invalid index", MO
, MONum
);
2395 void MachineVerifier::checkLivenessAtUse(const MachineOperand
*MO
,
2396 unsigned MONum
, SlotIndex UseIdx
,
2397 const LiveRange
&LR
,
2398 Register VRegOrUnit
,
2399 LaneBitmask LaneMask
) {
2400 const MachineInstr
*MI
= MO
->getParent();
2401 LiveQueryResult LRQ
= LR
.Query(UseIdx
);
2402 bool HasValue
= LRQ
.valueIn() || (MI
->isPHI() && LRQ
.valueOut());
2403 // Check if we have a segment at the use, note however that we only need one
2404 // live subregister range, the others may be dead.
2405 if (!HasValue
&& LaneMask
.none()) {
2406 report("No live segment at use", MO
, MONum
);
2407 report_context_liverange(LR
);
2408 report_context_vreg_regunit(VRegOrUnit
);
2409 report_context(UseIdx
);
2411 if (MO
->isKill() && !LRQ
.isKill()) {
2412 report("Live range continues after kill flag", MO
, MONum
);
2413 report_context_liverange(LR
);
2414 report_context_vreg_regunit(VRegOrUnit
);
2416 report_context_lanemask(LaneMask
);
2417 report_context(UseIdx
);
2421 void MachineVerifier::checkLivenessAtDef(const MachineOperand
*MO
,
2422 unsigned MONum
, SlotIndex DefIdx
,
2423 const LiveRange
&LR
,
2424 Register VRegOrUnit
,
2426 LaneBitmask LaneMask
) {
2427 if (const VNInfo
*VNI
= LR
.getVNInfoAt(DefIdx
)) {
2428 // The LR can correspond to the whole reg and its def slot is not obliged
2429 // to be the same as the MO' def slot. E.g. when we check here "normal"
2430 // subreg MO but there is other EC subreg MO in the same instruction so the
2431 // whole reg has EC def slot and differs from the currently checked MO' def
2432 // slot. For example:
2433 // %0 [16e,32r:0) 0@16e L..3 [16e,32r:0) 0@16e L..C [16r,32r:0) 0@16r
2434 // Check that there is an early-clobber def of the same superregister
2435 // somewhere is performed in visitMachineFunctionAfter()
2436 if (((SubRangeCheck
|| MO
->getSubReg() == 0) && VNI
->def
!= DefIdx
) ||
2437 !SlotIndex::isSameInstr(VNI
->def
, DefIdx
) ||
2438 (VNI
->def
!= DefIdx
&&
2439 (!VNI
->def
.isEarlyClobber() || !DefIdx
.isRegister()))) {
2440 report("Inconsistent valno->def", MO
, MONum
);
2441 report_context_liverange(LR
);
2442 report_context_vreg_regunit(VRegOrUnit
);
2444 report_context_lanemask(LaneMask
);
2445 report_context(*VNI
);
2446 report_context(DefIdx
);
2449 report("No live segment at def", MO
, MONum
);
2450 report_context_liverange(LR
);
2451 report_context_vreg_regunit(VRegOrUnit
);
2453 report_context_lanemask(LaneMask
);
2454 report_context(DefIdx
);
2456 // Check that, if the dead def flag is present, LiveInts agree.
2458 LiveQueryResult LRQ
= LR
.Query(DefIdx
);
2459 if (!LRQ
.isDeadDef()) {
2460 assert(VRegOrUnit
.isVirtual() && "Expecting a virtual register.");
2461 // A dead subreg def only tells us that the specific subreg is dead. There
2462 // could be other non-dead defs of other subregs, or we could have other
2463 // parts of the register being live through the instruction. So unless we
2464 // are checking liveness for a subrange it is ok for the live range to
2465 // continue, given that we have a dead def of a subregister.
2466 if (SubRangeCheck
|| MO
->getSubReg() == 0) {
2467 report("Live range continues after dead def flag", MO
, MONum
);
2468 report_context_liverange(LR
);
2469 report_context_vreg_regunit(VRegOrUnit
);
2471 report_context_lanemask(LaneMask
);
2477 void MachineVerifier::checkLiveness(const MachineOperand
*MO
, unsigned MONum
) {
2478 const MachineInstr
*MI
= MO
->getParent();
2479 const Register Reg
= MO
->getReg();
2480 const unsigned SubRegIdx
= MO
->getSubReg();
2482 const LiveInterval
*LI
= nullptr;
2483 if (LiveInts
&& Reg
.isVirtual()) {
2484 if (LiveInts
->hasInterval(Reg
)) {
2485 LI
= &LiveInts
->getInterval(Reg
);
2486 if (SubRegIdx
!= 0 && (MO
->isDef() || !MO
->isUndef()) && !LI
->empty() &&
2487 !LI
->hasSubRanges() && MRI
->shouldTrackSubRegLiveness(Reg
))
2488 report("Live interval for subreg operand has no subranges", MO
, MONum
);
2490 report("Virtual register has no live interval", MO
, MONum
);
2494 // Both use and def operands can read a register.
2495 if (MO
->readsReg()) {
2497 addRegWithSubRegs(regsKilled
, Reg
);
2499 // Check that LiveVars knows this kill (unless we are inside a bundle, in
2500 // which case we have already checked that LiveVars knows any kills on the
2501 // bundle header instead).
2502 if (LiveVars
&& Reg
.isVirtual() && MO
->isKill() &&
2503 !MI
->isBundledWithPred()) {
2504 LiveVariables::VarInfo
&VI
= LiveVars
->getVarInfo(Reg
);
2505 if (!is_contained(VI
.Kills
, MI
))
2506 report("Kill missing from LiveVariables", MO
, MONum
);
2509 // Check LiveInts liveness and kill.
2510 if (LiveInts
&& !LiveInts
->isNotInMIMap(*MI
)) {
2513 // PHI use occurs on the edge, so check for live out here instead.
2514 UseIdx
= LiveInts
->getMBBEndIdx(
2515 MI
->getOperand(MONum
+ 1).getMBB()).getPrevSlot();
2517 UseIdx
= LiveInts
->getInstructionIndex(*MI
);
2519 // Check the cached regunit intervals.
2520 if (Reg
.isPhysical() && !isReserved(Reg
)) {
2521 for (MCRegUnit Unit
: TRI
->regunits(Reg
.asMCReg())) {
2522 if (MRI
->isReservedRegUnit(Unit
))
2524 if (const LiveRange
*LR
= LiveInts
->getCachedRegUnit(Unit
))
2525 checkLivenessAtUse(MO
, MONum
, UseIdx
, *LR
, Unit
);
2529 if (Reg
.isVirtual()) {
2530 // This is a virtual register interval.
2531 checkLivenessAtUse(MO
, MONum
, UseIdx
, *LI
, Reg
);
2533 if (LI
->hasSubRanges() && !MO
->isDef()) {
2534 LaneBitmask MOMask
= SubRegIdx
!= 0
2535 ? TRI
->getSubRegIndexLaneMask(SubRegIdx
)
2536 : MRI
->getMaxLaneMaskForVReg(Reg
);
2537 LaneBitmask LiveInMask
;
2538 for (const LiveInterval::SubRange
&SR
: LI
->subranges()) {
2539 if ((MOMask
& SR
.LaneMask
).none())
2541 checkLivenessAtUse(MO
, MONum
, UseIdx
, SR
, Reg
, SR
.LaneMask
);
2542 LiveQueryResult LRQ
= SR
.Query(UseIdx
);
2543 if (LRQ
.valueIn() || (MI
->isPHI() && LRQ
.valueOut()))
2544 LiveInMask
|= SR
.LaneMask
;
2546 // At least parts of the register has to be live at the use.
2547 if ((LiveInMask
& MOMask
).none()) {
2548 report("No live subrange at use", MO
, MONum
);
2549 report_context(*LI
);
2550 report_context(UseIdx
);
2552 // For PHIs all lanes should be live
2553 if (MI
->isPHI() && LiveInMask
!= MOMask
) {
2554 report("Not all lanes of PHI source live at use", MO
, MONum
);
2555 report_context(*LI
);
2556 report_context(UseIdx
);
2562 // Use of a dead register.
2563 if (!regsLive
.count(Reg
)) {
2564 if (Reg
.isPhysical()) {
2565 // Reserved registers may be used even when 'dead'.
2566 bool Bad
= !isReserved(Reg
);
2567 // We are fine if just any subregister has a defined value.
2570 for (const MCPhysReg
&SubReg
: TRI
->subregs(Reg
)) {
2571 if (regsLive
.count(SubReg
)) {
2577 // If there is an additional implicit-use of a super register we stop
2578 // here. By definition we are fine if the super register is not
2579 // (completely) dead, if the complete super register is dead we will
2580 // get a report for its operand.
2582 for (const MachineOperand
&MOP
: MI
->uses()) {
2583 if (!MOP
.isReg() || !MOP
.isImplicit())
2586 if (!MOP
.getReg().isPhysical())
2589 if (llvm::is_contained(TRI
->subregs(MOP
.getReg()), Reg
))
2594 report("Using an undefined physical register", MO
, MONum
);
2595 } else if (MRI
->def_empty(Reg
)) {
2596 report("Reading virtual register without a def", MO
, MONum
);
2598 BBInfo
&MInfo
= MBBInfoMap
[MI
->getParent()];
2599 // We don't know which virtual registers are live in, so only complain
2600 // if vreg was killed in this MBB. Otherwise keep track of vregs that
2601 // must be live in. PHI instructions are handled separately.
2602 if (MInfo
.regsKilled
.count(Reg
))
2603 report("Using a killed virtual register", MO
, MONum
);
2604 else if (!MI
->isPHI())
2605 MInfo
.vregsLiveIn
.insert(std::make_pair(Reg
, MI
));
2611 // Register defined.
2612 // TODO: verify that earlyclobber ops are not used.
2614 addRegWithSubRegs(regsDead
, Reg
);
2616 addRegWithSubRegs(regsDefined
, Reg
);
2619 if (MRI
->isSSA() && Reg
.isVirtual() &&
2620 std::next(MRI
->def_begin(Reg
)) != MRI
->def_end())
2621 report("Multiple virtual register defs in SSA form", MO
, MONum
);
2623 // Check LiveInts for a live segment, but only for virtual registers.
2624 if (LiveInts
&& !LiveInts
->isNotInMIMap(*MI
)) {
2625 SlotIndex DefIdx
= LiveInts
->getInstructionIndex(*MI
);
2626 DefIdx
= DefIdx
.getRegSlot(MO
->isEarlyClobber());
2628 if (Reg
.isVirtual()) {
2629 checkLivenessAtDef(MO
, MONum
, DefIdx
, *LI
, Reg
);
2631 if (LI
->hasSubRanges()) {
2632 LaneBitmask MOMask
= SubRegIdx
!= 0
2633 ? TRI
->getSubRegIndexLaneMask(SubRegIdx
)
2634 : MRI
->getMaxLaneMaskForVReg(Reg
);
2635 for (const LiveInterval::SubRange
&SR
: LI
->subranges()) {
2636 if ((SR
.LaneMask
& MOMask
).none())
2638 checkLivenessAtDef(MO
, MONum
, DefIdx
, SR
, Reg
, true, SR
.LaneMask
);
2646 // This function gets called after visiting all instructions in a bundle. The
2647 // argument points to the bundle header.
2648 // Normal stand-alone instructions are also considered 'bundles', and this
2649 // function is called for all of them.
2650 void MachineVerifier::visitMachineBundleAfter(const MachineInstr
*MI
) {
2651 BBInfo
&MInfo
= MBBInfoMap
[MI
->getParent()];
2652 set_union(MInfo
.regsKilled
, regsKilled
);
2653 set_subtract(regsLive
, regsKilled
); regsKilled
.clear();
2654 // Kill any masked registers.
2655 while (!regMasks
.empty()) {
2656 const uint32_t *Mask
= regMasks
.pop_back_val();
2657 for (Register Reg
: regsLive
)
2658 if (Reg
.isPhysical() &&
2659 MachineOperand::clobbersPhysReg(Mask
, Reg
.asMCReg()))
2660 regsDead
.push_back(Reg
);
2662 set_subtract(regsLive
, regsDead
); regsDead
.clear();
2663 set_union(regsLive
, regsDefined
); regsDefined
.clear();
2667 MachineVerifier::visitMachineBasicBlockAfter(const MachineBasicBlock
*MBB
) {
2668 MBBInfoMap
[MBB
].regsLiveOut
= regsLive
;
2672 SlotIndex stop
= Indexes
->getMBBEndIdx(MBB
);
2673 if (!(stop
> lastIndex
)) {
2674 report("Block ends before last instruction index", MBB
);
2675 errs() << "Block ends at " << stop
2676 << " last instruction was at " << lastIndex
<< '\n';
2683 // This implements a set of registers that serves as a filter: can filter other
2684 // sets by passing through elements not in the filter and blocking those that
2685 // are. Any filter implicitly includes the full set of physical registers upon
2686 // creation, thus filtering them all out. The filter itself as a set only grows,
2687 // and needs to be as efficient as possible.
2689 // Add elements to the filter itself. \pre Input set \p FromRegSet must have
2690 // no duplicates. Both virtual and physical registers are fine.
2691 template <typename RegSetT
> void add(const RegSetT
&FromRegSet
) {
2692 SmallVector
<Register
, 0> VRegsBuffer
;
2693 filterAndAdd(FromRegSet
, VRegsBuffer
);
2695 // Filter \p FromRegSet through the filter and append passed elements into \p
2696 // ToVRegs. All elements appended are then added to the filter itself.
2697 // \returns true if anything changed.
2698 template <typename RegSetT
>
2699 bool filterAndAdd(const RegSetT
&FromRegSet
,
2700 SmallVectorImpl
<Register
> &ToVRegs
) {
2701 unsigned SparseUniverse
= Sparse
.size();
2702 unsigned NewSparseUniverse
= SparseUniverse
;
2703 unsigned NewDenseSize
= Dense
.size();
2704 size_t Begin
= ToVRegs
.size();
2705 for (Register Reg
: FromRegSet
) {
2706 if (!Reg
.isVirtual())
2708 unsigned Index
= Register::virtReg2Index(Reg
);
2709 if (Index
< SparseUniverseMax
) {
2710 if (Index
< SparseUniverse
&& Sparse
.test(Index
))
2712 NewSparseUniverse
= std::max(NewSparseUniverse
, Index
+ 1);
2714 if (Dense
.count(Reg
))
2718 ToVRegs
.push_back(Reg
);
2720 size_t End
= ToVRegs
.size();
2723 // Reserving space in sets once performs better than doing so continuously
2724 // and pays easily for double look-ups (even in Dense with SparseUniverseMax
2725 // tuned all the way down) and double iteration (the second one is over a
2726 // SmallVector, which is a lot cheaper compared to DenseSet or BitVector).
2727 Sparse
.resize(NewSparseUniverse
);
2728 Dense
.reserve(NewDenseSize
);
2729 for (unsigned I
= Begin
; I
< End
; ++I
) {
2730 Register Reg
= ToVRegs
[I
];
2731 unsigned Index
= Register::virtReg2Index(Reg
);
2732 if (Index
< SparseUniverseMax
)
2741 static constexpr unsigned SparseUniverseMax
= 10 * 1024 * 8;
2742 // VRegs indexed within SparseUniverseMax are tracked by Sparse, those beyound
2743 // are tracked by Dense. The only purpose of the threashold and the Dense set
2744 // is to have a reasonably growing memory usage in pathological cases (large
2745 // number of very sparse VRegFilter instances live at the same time). In
2746 // practice even in the worst-by-execution time cases having all elements
2747 // tracked by Sparse (very large SparseUniverseMax scenario) tends to be more
2748 // space efficient than if tracked by Dense. The threashold is set to keep the
2749 // worst-case memory usage within 2x of figures determined empirically for
2750 // "all Dense" scenario in such worst-by-execution-time cases.
2752 DenseSet
<unsigned> Dense
;
2755 // Implements both a transfer function and a (binary, in-place) join operator
2756 // for a dataflow over register sets with set union join and filtering transfer
2757 // (out_b = in_b \ filter_b). filter_b is expected to be set-up ahead of time.
2758 // Maintains out_b as its state, allowing for O(n) iteration over it at any
2759 // time, where n is the size of the set (as opposed to O(U) where U is the
2760 // universe). filter_b implicitly contains all physical registers at all times.
2761 class FilteringVRegSet
{
2763 SmallVector
<Register
, 0> VRegs
;
2766 // Set-up the filter_b. \pre Input register set \p RS must have no duplicates.
2767 // Both virtual and physical registers are fine.
2768 template <typename RegSetT
> void addToFilter(const RegSetT
&RS
) {
2771 // Passes \p RS through the filter_b (transfer function) and adds what's left
2772 // to itself (out_b).
2773 template <typename RegSetT
> bool add(const RegSetT
&RS
) {
2774 // Double-duty the Filter: to maintain VRegs a set (and the join operation
2775 // a set union) just add everything being added here to the Filter as well.
2776 return Filter
.filterAndAdd(RS
, VRegs
);
2778 using const_iterator
= decltype(VRegs
)::const_iterator
;
2779 const_iterator
begin() const { return VRegs
.begin(); }
2780 const_iterator
end() const { return VRegs
.end(); }
2781 size_t size() const { return VRegs
.size(); }
2785 // Calculate the largest possible vregsPassed sets. These are the registers that
2786 // can pass through an MBB live, but may not be live every time. It is assumed
2787 // that all vregsPassed sets are empty before the call.
2788 void MachineVerifier::calcRegsPassed() {
2790 // ReversePostOrderTraversal doesn't handle empty functions.
2793 for (const MachineBasicBlock
*MB
:
2794 ReversePostOrderTraversal
<const MachineFunction
*>(MF
)) {
2795 FilteringVRegSet VRegs
;
2796 BBInfo
&Info
= MBBInfoMap
[MB
];
2797 assert(Info
.reachable
);
2799 VRegs
.addToFilter(Info
.regsKilled
);
2800 VRegs
.addToFilter(Info
.regsLiveOut
);
2801 for (const MachineBasicBlock
*Pred
: MB
->predecessors()) {
2802 const BBInfo
&PredInfo
= MBBInfoMap
[Pred
];
2803 if (!PredInfo
.reachable
)
2806 VRegs
.add(PredInfo
.regsLiveOut
);
2807 VRegs
.add(PredInfo
.vregsPassed
);
2809 Info
.vregsPassed
.reserve(VRegs
.size());
2810 Info
.vregsPassed
.insert(VRegs
.begin(), VRegs
.end());
2814 // Calculate the set of virtual registers that must be passed through each basic
2815 // block in order to satisfy the requirements of successor blocks. This is very
2816 // similar to calcRegsPassed, only backwards.
2817 void MachineVerifier::calcRegsRequired() {
2818 // First push live-in regs to predecessors' vregsRequired.
2819 SmallPtrSet
<const MachineBasicBlock
*, 8> todo
;
2820 for (const auto &MBB
: *MF
) {
2821 BBInfo
&MInfo
= MBBInfoMap
[&MBB
];
2822 for (const MachineBasicBlock
*Pred
: MBB
.predecessors()) {
2823 BBInfo
&PInfo
= MBBInfoMap
[Pred
];
2824 if (PInfo
.addRequired(MInfo
.vregsLiveIn
))
2828 // Handle the PHI node.
2829 for (const MachineInstr
&MI
: MBB
.phis()) {
2830 for (unsigned i
= 1, e
= MI
.getNumOperands(); i
!= e
; i
+= 2) {
2831 // Skip those Operands which are undef regs or not regs.
2832 if (!MI
.getOperand(i
).isReg() || !MI
.getOperand(i
).readsReg())
2835 // Get register and predecessor for one PHI edge.
2836 Register Reg
= MI
.getOperand(i
).getReg();
2837 const MachineBasicBlock
*Pred
= MI
.getOperand(i
+ 1).getMBB();
2839 BBInfo
&PInfo
= MBBInfoMap
[Pred
];
2840 if (PInfo
.addRequired(Reg
))
2846 // Iteratively push vregsRequired to predecessors. This will converge to the
2847 // same final state regardless of DenseSet iteration order.
2848 while (!todo
.empty()) {
2849 const MachineBasicBlock
*MBB
= *todo
.begin();
2851 BBInfo
&MInfo
= MBBInfoMap
[MBB
];
2852 for (const MachineBasicBlock
*Pred
: MBB
->predecessors()) {
2855 BBInfo
&SInfo
= MBBInfoMap
[Pred
];
2856 if (SInfo
.addRequired(MInfo
.vregsRequired
))
2862 // Check PHI instructions at the beginning of MBB. It is assumed that
2863 // calcRegsPassed has been run so BBInfo::isLiveOut is valid.
2864 void MachineVerifier::checkPHIOps(const MachineBasicBlock
&MBB
) {
2865 BBInfo
&MInfo
= MBBInfoMap
[&MBB
];
2867 SmallPtrSet
<const MachineBasicBlock
*, 8> seen
;
2868 for (const MachineInstr
&Phi
: MBB
) {
2873 const MachineOperand
&MODef
= Phi
.getOperand(0);
2874 if (!MODef
.isReg() || !MODef
.isDef()) {
2875 report("Expected first PHI operand to be a register def", &MODef
, 0);
2878 if (MODef
.isTied() || MODef
.isImplicit() || MODef
.isInternalRead() ||
2879 MODef
.isEarlyClobber() || MODef
.isDebug())
2880 report("Unexpected flag on PHI operand", &MODef
, 0);
2881 Register DefReg
= MODef
.getReg();
2882 if (!DefReg
.isVirtual())
2883 report("Expected first PHI operand to be a virtual register", &MODef
, 0);
2885 for (unsigned I
= 1, E
= Phi
.getNumOperands(); I
!= E
; I
+= 2) {
2886 const MachineOperand
&MO0
= Phi
.getOperand(I
);
2888 report("Expected PHI operand to be a register", &MO0
, I
);
2891 if (MO0
.isImplicit() || MO0
.isInternalRead() || MO0
.isEarlyClobber() ||
2892 MO0
.isDebug() || MO0
.isTied())
2893 report("Unexpected flag on PHI operand", &MO0
, I
);
2895 const MachineOperand
&MO1
= Phi
.getOperand(I
+ 1);
2897 report("Expected PHI operand to be a basic block", &MO1
, I
+ 1);
2901 const MachineBasicBlock
&Pre
= *MO1
.getMBB();
2902 if (!Pre
.isSuccessor(&MBB
)) {
2903 report("PHI input is not a predecessor block", &MO1
, I
+ 1);
2907 if (MInfo
.reachable
) {
2909 BBInfo
&PrInfo
= MBBInfoMap
[&Pre
];
2910 if (!MO0
.isUndef() && PrInfo
.reachable
&&
2911 !PrInfo
.isLiveOut(MO0
.getReg()))
2912 report("PHI operand is not live-out from predecessor", &MO0
, I
);
2916 // Did we see all predecessors?
2917 if (MInfo
.reachable
) {
2918 for (MachineBasicBlock
*Pred
: MBB
.predecessors()) {
2919 if (!seen
.count(Pred
)) {
2920 report("Missing PHI operand", &Phi
);
2921 errs() << printMBBReference(*Pred
)
2922 << " is a predecessor according to the CFG.\n";
2929 void MachineVerifier::visitMachineFunctionAfter() {
2932 for (const MachineBasicBlock
&MBB
: *MF
)
2935 // Now check liveness info if available
2938 // Check for killed virtual registers that should be live out.
2939 for (const auto &MBB
: *MF
) {
2940 BBInfo
&MInfo
= MBBInfoMap
[&MBB
];
2941 for (Register VReg
: MInfo
.vregsRequired
)
2942 if (MInfo
.regsKilled
.count(VReg
)) {
2943 report("Virtual register killed in block, but needed live out.", &MBB
);
2944 errs() << "Virtual register " << printReg(VReg
)
2945 << " is used after the block.\n";
2950 BBInfo
&MInfo
= MBBInfoMap
[&MF
->front()];
2951 for (Register VReg
: MInfo
.vregsRequired
) {
2952 report("Virtual register defs don't dominate all uses.", MF
);
2953 report_context_vreg(VReg
);
2958 verifyLiveVariables();
2960 verifyLiveIntervals();
2962 // Check live-in list of each MBB. If a register is live into MBB, check
2963 // that the register is in regsLiveOut of each predecessor block. Since
2964 // this must come from a definition in the predecesssor or its live-in
2965 // list, this will catch a live-through case where the predecessor does not
2966 // have the register in its live-in list. This currently only checks
2967 // registers that have no aliases, are not allocatable and are not
2968 // reserved, which could mean a condition code register for instance.
2969 if (MRI
->tracksLiveness())
2970 for (const auto &MBB
: *MF
)
2971 for (MachineBasicBlock::RegisterMaskPair P
: MBB
.liveins()) {
2972 MCPhysReg LiveInReg
= P
.PhysReg
;
2973 bool hasAliases
= MCRegAliasIterator(LiveInReg
, TRI
, false).isValid();
2974 if (hasAliases
|| isAllocatable(LiveInReg
) || isReserved(LiveInReg
))
2976 for (const MachineBasicBlock
*Pred
: MBB
.predecessors()) {
2977 BBInfo
&PInfo
= MBBInfoMap
[Pred
];
2978 if (!PInfo
.regsLiveOut
.count(LiveInReg
)) {
2979 report("Live in register not found to be live out from predecessor.",
2981 errs() << TRI
->getName(LiveInReg
)
2982 << " not found to be live out from "
2983 << printMBBReference(*Pred
) << "\n";
2988 for (auto CSInfo
: MF
->getCallSitesInfo())
2989 if (!CSInfo
.first
->isCall())
2990 report("Call site info referencing instruction that is not call", MF
);
2992 // If there's debug-info, check that we don't have any duplicate value
2993 // tracking numbers.
2994 if (MF
->getFunction().getSubprogram()) {
2995 DenseSet
<unsigned> SeenNumbers
;
2996 for (const auto &MBB
: *MF
) {
2997 for (const auto &MI
: MBB
) {
2998 if (auto Num
= MI
.peekDebugInstrNum()) {
2999 auto Result
= SeenNumbers
.insert((unsigned)Num
);
3001 report("Instruction has a duplicated value tracking number", &MI
);
3008 void MachineVerifier::verifyLiveVariables() {
3009 assert(LiveVars
&& "Don't call verifyLiveVariables without LiveVars");
3010 for (unsigned I
= 0, E
= MRI
->getNumVirtRegs(); I
!= E
; ++I
) {
3011 Register Reg
= Register::index2VirtReg(I
);
3012 LiveVariables::VarInfo
&VI
= LiveVars
->getVarInfo(Reg
);
3013 for (const auto &MBB
: *MF
) {
3014 BBInfo
&MInfo
= MBBInfoMap
[&MBB
];
3016 // Our vregsRequired should be identical to LiveVariables' AliveBlocks
3017 if (MInfo
.vregsRequired
.count(Reg
)) {
3018 if (!VI
.AliveBlocks
.test(MBB
.getNumber())) {
3019 report("LiveVariables: Block missing from AliveBlocks", &MBB
);
3020 errs() << "Virtual register " << printReg(Reg
)
3021 << " must be live through the block.\n";
3024 if (VI
.AliveBlocks
.test(MBB
.getNumber())) {
3025 report("LiveVariables: Block should not be in AliveBlocks", &MBB
);
3026 errs() << "Virtual register " << printReg(Reg
)
3027 << " is not needed live through the block.\n";
3034 void MachineVerifier::verifyLiveIntervals() {
3035 assert(LiveInts
&& "Don't call verifyLiveIntervals without LiveInts");
3036 for (unsigned I
= 0, E
= MRI
->getNumVirtRegs(); I
!= E
; ++I
) {
3037 Register Reg
= Register::index2VirtReg(I
);
3039 // Spilling and splitting may leave unused registers around. Skip them.
3040 if (MRI
->reg_nodbg_empty(Reg
))
3043 if (!LiveInts
->hasInterval(Reg
)) {
3044 report("Missing live interval for virtual register", MF
);
3045 errs() << printReg(Reg
, TRI
) << " still has defs or uses\n";
3049 const LiveInterval
&LI
= LiveInts
->getInterval(Reg
);
3050 assert(Reg
== LI
.reg() && "Invalid reg to interval mapping");
3051 verifyLiveInterval(LI
);
3054 // Verify all the cached regunit intervals.
3055 for (unsigned i
= 0, e
= TRI
->getNumRegUnits(); i
!= e
; ++i
)
3056 if (const LiveRange
*LR
= LiveInts
->getCachedRegUnit(i
))
3057 verifyLiveRange(*LR
, i
);
3060 void MachineVerifier::verifyLiveRangeValue(const LiveRange
&LR
,
3061 const VNInfo
*VNI
, Register Reg
,
3062 LaneBitmask LaneMask
) {
3063 if (VNI
->isUnused())
3066 const VNInfo
*DefVNI
= LR
.getVNInfoAt(VNI
->def
);
3069 report("Value not live at VNInfo def and not marked unused", MF
);
3070 report_context(LR
, Reg
, LaneMask
);
3071 report_context(*VNI
);
3075 if (DefVNI
!= VNI
) {
3076 report("Live segment at def has different VNInfo", MF
);
3077 report_context(LR
, Reg
, LaneMask
);
3078 report_context(*VNI
);
3082 const MachineBasicBlock
*MBB
= LiveInts
->getMBBFromIndex(VNI
->def
);
3084 report("Invalid VNInfo definition index", MF
);
3085 report_context(LR
, Reg
, LaneMask
);
3086 report_context(*VNI
);
3090 if (VNI
->isPHIDef()) {
3091 if (VNI
->def
!= LiveInts
->getMBBStartIdx(MBB
)) {
3092 report("PHIDef VNInfo is not defined at MBB start", MBB
);
3093 report_context(LR
, Reg
, LaneMask
);
3094 report_context(*VNI
);
3100 const MachineInstr
*MI
= LiveInts
->getInstructionFromIndex(VNI
->def
);
3102 report("No instruction at VNInfo def index", MBB
);
3103 report_context(LR
, Reg
, LaneMask
);
3104 report_context(*VNI
);
3109 bool hasDef
= false;
3110 bool isEarlyClobber
= false;
3111 for (ConstMIBundleOperands
MOI(*MI
); MOI
.isValid(); ++MOI
) {
3112 if (!MOI
->isReg() || !MOI
->isDef())
3114 if (Reg
.isVirtual()) {
3115 if (MOI
->getReg() != Reg
)
3118 if (!MOI
->getReg().isPhysical() || !TRI
->hasRegUnit(MOI
->getReg(), Reg
))
3121 if (LaneMask
.any() &&
3122 (TRI
->getSubRegIndexLaneMask(MOI
->getSubReg()) & LaneMask
).none())
3125 if (MOI
->isEarlyClobber())
3126 isEarlyClobber
= true;
3130 report("Defining instruction does not modify register", MI
);
3131 report_context(LR
, Reg
, LaneMask
);
3132 report_context(*VNI
);
3135 // Early clobber defs begin at USE slots, but other defs must begin at
3137 if (isEarlyClobber
) {
3138 if (!VNI
->def
.isEarlyClobber()) {
3139 report("Early clobber def must be at an early-clobber slot", MBB
);
3140 report_context(LR
, Reg
, LaneMask
);
3141 report_context(*VNI
);
3143 } else if (!VNI
->def
.isRegister()) {
3144 report("Non-PHI, non-early clobber def must be at a register slot", MBB
);
3145 report_context(LR
, Reg
, LaneMask
);
3146 report_context(*VNI
);
3151 void MachineVerifier::verifyLiveRangeSegment(const LiveRange
&LR
,
3152 const LiveRange::const_iterator I
,
3154 LaneBitmask LaneMask
) {
3155 const LiveRange::Segment
&S
= *I
;
3156 const VNInfo
*VNI
= S
.valno
;
3157 assert(VNI
&& "Live segment has no valno");
3159 if (VNI
->id
>= LR
.getNumValNums() || VNI
!= LR
.getValNumInfo(VNI
->id
)) {
3160 report("Foreign valno in live segment", MF
);
3161 report_context(LR
, Reg
, LaneMask
);
3163 report_context(*VNI
);
3166 if (VNI
->isUnused()) {
3167 report("Live segment valno is marked unused", MF
);
3168 report_context(LR
, Reg
, LaneMask
);
3172 const MachineBasicBlock
*MBB
= LiveInts
->getMBBFromIndex(S
.start
);
3174 report("Bad start of live segment, no basic block", MF
);
3175 report_context(LR
, Reg
, LaneMask
);
3179 SlotIndex MBBStartIdx
= LiveInts
->getMBBStartIdx(MBB
);
3180 if (S
.start
!= MBBStartIdx
&& S
.start
!= VNI
->def
) {
3181 report("Live segment must begin at MBB entry or valno def", MBB
);
3182 report_context(LR
, Reg
, LaneMask
);
3186 const MachineBasicBlock
*EndMBB
=
3187 LiveInts
->getMBBFromIndex(S
.end
.getPrevSlot());
3189 report("Bad end of live segment, no basic block", MF
);
3190 report_context(LR
, Reg
, LaneMask
);
3195 // Checks for non-live-out segments.
3196 if (S
.end
!= LiveInts
->getMBBEndIdx(EndMBB
)) {
3197 // RegUnit intervals are allowed dead phis.
3198 if (!Reg
.isVirtual() && VNI
->isPHIDef() && S
.start
== VNI
->def
&&
3199 S
.end
== VNI
->def
.getDeadSlot())
3202 // The live segment is ending inside EndMBB
3203 const MachineInstr
*MI
=
3204 LiveInts
->getInstructionFromIndex(S
.end
.getPrevSlot());
3206 report("Live segment doesn't end at a valid instruction", EndMBB
);
3207 report_context(LR
, Reg
, LaneMask
);
3212 // The block slot must refer to a basic block boundary.
3213 if (S
.end
.isBlock()) {
3214 report("Live segment ends at B slot of an instruction", EndMBB
);
3215 report_context(LR
, Reg
, LaneMask
);
3219 if (S
.end
.isDead()) {
3220 // Segment ends on the dead slot.
3221 // That means there must be a dead def.
3222 if (!SlotIndex::isSameInstr(S
.start
, S
.end
)) {
3223 report("Live segment ending at dead slot spans instructions", EndMBB
);
3224 report_context(LR
, Reg
, LaneMask
);
3229 // After tied operands are rewritten, a live segment can only end at an
3230 // early-clobber slot if it is being redefined by an early-clobber def.
3231 // TODO: Before tied operands are rewritten, a live segment can only end at
3232 // an early-clobber slot if the last use is tied to an early-clobber def.
3233 if (MF
->getProperties().hasProperty(
3234 MachineFunctionProperties::Property::TiedOpsRewritten
) &&
3235 S
.end
.isEarlyClobber()) {
3236 if (I
+ 1 == LR
.end() || (I
+ 1)->start
!= S
.end
) {
3237 report("Live segment ending at early clobber slot must be "
3238 "redefined by an EC def in the same instruction",
3240 report_context(LR
, Reg
, LaneMask
);
3245 // The following checks only apply to virtual registers. Physreg liveness
3246 // is too weird to check.
3247 if (Reg
.isVirtual()) {
3248 // A live segment can end with either a redefinition, a kill flag on a
3249 // use, or a dead flag on a def.
3250 bool hasRead
= false;
3251 bool hasSubRegDef
= false;
3252 bool hasDeadDef
= false;
3253 for (ConstMIBundleOperands
MOI(*MI
); MOI
.isValid(); ++MOI
) {
3254 if (!MOI
->isReg() || MOI
->getReg() != Reg
)
3256 unsigned Sub
= MOI
->getSubReg();
3258 Sub
!= 0 ? TRI
->getSubRegIndexLaneMask(Sub
) : LaneBitmask::getAll();
3261 hasSubRegDef
= true;
3262 // An operand %0:sub0 reads %0:sub1..n. Invert the lane
3263 // mask for subregister defs. Read-undef defs will be handled by
3270 if (LaneMask
.any() && (LaneMask
& SLM
).none())
3272 if (MOI
->readsReg())
3275 if (S
.end
.isDead()) {
3276 // Make sure that the corresponding machine operand for a "dead" live
3277 // range has the dead flag. We cannot perform this check for subregister
3278 // liveranges as partially dead values are allowed.
3279 if (LaneMask
.none() && !hasDeadDef
) {
3281 "Instruction ending live segment on dead slot has no dead flag",
3283 report_context(LR
, Reg
, LaneMask
);
3288 // When tracking subregister liveness, the main range must start new
3289 // values on partial register writes, even if there is no read.
3290 if (!MRI
->shouldTrackSubRegLiveness(Reg
) || LaneMask
.any() ||
3292 report("Instruction ending live segment doesn't read the register",
3294 report_context(LR
, Reg
, LaneMask
);
3302 // Now check all the basic blocks in this live segment.
3303 MachineFunction::const_iterator MFI
= MBB
->getIterator();
3304 // Is this live segment the beginning of a non-PHIDef VN?
3305 if (S
.start
== VNI
->def
&& !VNI
->isPHIDef()) {
3306 // Not live-in to any blocks.
3313 SmallVector
<SlotIndex
, 4> Undefs
;
3314 if (LaneMask
.any()) {
3315 LiveInterval
&OwnerLI
= LiveInts
->getInterval(Reg
);
3316 OwnerLI
.computeSubRangeUndefs(Undefs
, LaneMask
, *MRI
, *Indexes
);
3320 assert(LiveInts
->isLiveInToMBB(LR
, &*MFI
));
3321 // We don't know how to track physregs into a landing pad.
3322 if (!Reg
.isVirtual() && MFI
->isEHPad()) {
3323 if (&*MFI
== EndMBB
)
3329 // Is VNI a PHI-def in the current block?
3330 bool IsPHI
= VNI
->isPHIDef() &&
3331 VNI
->def
== LiveInts
->getMBBStartIdx(&*MFI
);
3333 // Check that VNI is live-out of all predecessors.
3334 for (const MachineBasicBlock
*Pred
: MFI
->predecessors()) {
3335 SlotIndex PEnd
= LiveInts
->getMBBEndIdx(Pred
);
3336 // Predecessor of landing pad live-out on last call.
3337 if (MFI
->isEHPad()) {
3338 for (const MachineInstr
&MI
: llvm::reverse(*Pred
)) {
3340 PEnd
= Indexes
->getInstructionIndex(MI
).getBoundaryIndex();
3345 const VNInfo
*PVNI
= LR
.getVNInfoBefore(PEnd
);
3347 // All predecessors must have a live-out value. However for a phi
3348 // instruction with subregister intervals
3349 // only one of the subregisters (not necessarily the current one) needs to
3351 if (!PVNI
&& (LaneMask
.none() || !IsPHI
)) {
3352 if (LiveRangeCalc::isJointlyDominated(Pred
, Undefs
, *Indexes
))
3354 report("Register not marked live out of predecessor", Pred
);
3355 report_context(LR
, Reg
, LaneMask
);
3356 report_context(*VNI
);
3357 errs() << " live into " << printMBBReference(*MFI
) << '@'
3358 << LiveInts
->getMBBStartIdx(&*MFI
) << ", not live before "
3363 // Only PHI-defs can take different predecessor values.
3364 if (!IsPHI
&& PVNI
!= VNI
) {
3365 report("Different value live out of predecessor", Pred
);
3366 report_context(LR
, Reg
, LaneMask
);
3367 errs() << "Valno #" << PVNI
->id
<< " live out of "
3368 << printMBBReference(*Pred
) << '@' << PEnd
<< "\nValno #"
3369 << VNI
->id
<< " live into " << printMBBReference(*MFI
) << '@'
3370 << LiveInts
->getMBBStartIdx(&*MFI
) << '\n';
3373 if (&*MFI
== EndMBB
)
3379 void MachineVerifier::verifyLiveRange(const LiveRange
&LR
, Register Reg
,
3380 LaneBitmask LaneMask
) {
3381 for (const VNInfo
*VNI
: LR
.valnos
)
3382 verifyLiveRangeValue(LR
, VNI
, Reg
, LaneMask
);
3384 for (LiveRange::const_iterator I
= LR
.begin(), E
= LR
.end(); I
!= E
; ++I
)
3385 verifyLiveRangeSegment(LR
, I
, Reg
, LaneMask
);
3388 void MachineVerifier::verifyLiveInterval(const LiveInterval
&LI
) {
3389 Register Reg
= LI
.reg();
3390 assert(Reg
.isVirtual());
3391 verifyLiveRange(LI
, Reg
);
3393 if (LI
.hasSubRanges()) {
3395 LaneBitmask MaxMask
= MRI
->getMaxLaneMaskForVReg(Reg
);
3396 for (const LiveInterval::SubRange
&SR
: LI
.subranges()) {
3397 if ((Mask
& SR
.LaneMask
).any()) {
3398 report("Lane masks of sub ranges overlap in live interval", MF
);
3401 if ((SR
.LaneMask
& ~MaxMask
).any()) {
3402 report("Subrange lanemask is invalid", MF
);
3406 report("Subrange must not be empty", MF
);
3407 report_context(SR
, LI
.reg(), SR
.LaneMask
);
3409 Mask
|= SR
.LaneMask
;
3410 verifyLiveRange(SR
, LI
.reg(), SR
.LaneMask
);
3411 if (!LI
.covers(SR
)) {
3412 report("A Subrange is not covered by the main range", MF
);
3418 // Check the LI only has one connected component.
3419 ConnectedVNInfoEqClasses
ConEQ(*LiveInts
);
3420 unsigned NumComp
= ConEQ
.Classify(LI
);
3422 report("Multiple connected components in live interval", MF
);
3424 for (unsigned comp
= 0; comp
!= NumComp
; ++comp
) {
3425 errs() << comp
<< ": valnos";
3426 for (const VNInfo
*I
: LI
.valnos
)
3427 if (comp
== ConEQ
.getEqClass(I
))
3428 errs() << ' ' << I
->id
;
3436 // FrameSetup and FrameDestroy can have zero adjustment, so using a single
3437 // integer, we can't tell whether it is a FrameSetup or FrameDestroy if the
3439 // We use a bool plus an integer to capture the stack state.
3440 struct StackStateOfBB
{
3441 StackStateOfBB() = default;
3442 StackStateOfBB(int EntryVal
, int ExitVal
, bool EntrySetup
, bool ExitSetup
) :
3443 EntryValue(EntryVal
), ExitValue(ExitVal
), EntryIsSetup(EntrySetup
),
3444 ExitIsSetup(ExitSetup
) {}
3446 // Can be negative, which means we are setting up a frame.
3449 bool EntryIsSetup
= false;
3450 bool ExitIsSetup
= false;
3453 } // end anonymous namespace
3455 /// Make sure on every path through the CFG, a FrameSetup <n> is always followed
3456 /// by a FrameDestroy <n>, stack adjustments are identical on all
3457 /// CFG edges to a merge point, and frame is destroyed at end of a return block.
3458 void MachineVerifier::verifyStackFrame() {
3459 unsigned FrameSetupOpcode
= TII
->getCallFrameSetupOpcode();
3460 unsigned FrameDestroyOpcode
= TII
->getCallFrameDestroyOpcode();
3461 if (FrameSetupOpcode
== ~0u && FrameDestroyOpcode
== ~0u)
3464 SmallVector
<StackStateOfBB
, 8> SPState
;
3465 SPState
.resize(MF
->getNumBlockIDs());
3466 df_iterator_default_set
<const MachineBasicBlock
*> Reachable
;
3468 // Visit the MBBs in DFS order.
3469 for (df_ext_iterator
<const MachineFunction
*,
3470 df_iterator_default_set
<const MachineBasicBlock
*>>
3471 DFI
= df_ext_begin(MF
, Reachable
), DFE
= df_ext_end(MF
, Reachable
);
3472 DFI
!= DFE
; ++DFI
) {
3473 const MachineBasicBlock
*MBB
= *DFI
;
3475 StackStateOfBB BBState
;
3476 // Check the exit state of the DFS stack predecessor.
3477 if (DFI
.getPathLength() >= 2) {
3478 const MachineBasicBlock
*StackPred
= DFI
.getPath(DFI
.getPathLength() - 2);
3479 assert(Reachable
.count(StackPred
) &&
3480 "DFS stack predecessor is already visited.\n");
3481 BBState
.EntryValue
= SPState
[StackPred
->getNumber()].ExitValue
;
3482 BBState
.EntryIsSetup
= SPState
[StackPred
->getNumber()].ExitIsSetup
;
3483 BBState
.ExitValue
= BBState
.EntryValue
;
3484 BBState
.ExitIsSetup
= BBState
.EntryIsSetup
;
3487 if ((int)MBB
->getCallFrameSize() != -BBState
.EntryValue
) {
3488 report("Call frame size on entry does not match value computed from "
3491 errs() << "Call frame size on entry " << MBB
->getCallFrameSize()
3492 << " does not match value computed from predecessor "
3493 << -BBState
.EntryValue
<< '\n';
3496 // Update stack state by checking contents of MBB.
3497 for (const auto &I
: *MBB
) {
3498 if (I
.getOpcode() == FrameSetupOpcode
) {
3499 if (BBState
.ExitIsSetup
)
3500 report("FrameSetup is after another FrameSetup", &I
);
3501 BBState
.ExitValue
-= TII
->getFrameTotalSize(I
);
3502 BBState
.ExitIsSetup
= true;
3505 if (I
.getOpcode() == FrameDestroyOpcode
) {
3506 int Size
= TII
->getFrameTotalSize(I
);
3507 if (!BBState
.ExitIsSetup
)
3508 report("FrameDestroy is not after a FrameSetup", &I
);
3509 int AbsSPAdj
= BBState
.ExitValue
< 0 ? -BBState
.ExitValue
:
3511 if (BBState
.ExitIsSetup
&& AbsSPAdj
!= Size
) {
3512 report("FrameDestroy <n> is after FrameSetup <m>", &I
);
3513 errs() << "FrameDestroy <" << Size
<< "> is after FrameSetup <"
3514 << AbsSPAdj
<< ">.\n";
3516 BBState
.ExitValue
+= Size
;
3517 BBState
.ExitIsSetup
= false;
3520 SPState
[MBB
->getNumber()] = BBState
;
3522 // Make sure the exit state of any predecessor is consistent with the entry
3524 for (const MachineBasicBlock
*Pred
: MBB
->predecessors()) {
3525 if (Reachable
.count(Pred
) &&
3526 (SPState
[Pred
->getNumber()].ExitValue
!= BBState
.EntryValue
||
3527 SPState
[Pred
->getNumber()].ExitIsSetup
!= BBState
.EntryIsSetup
)) {
3528 report("The exit stack state of a predecessor is inconsistent.", MBB
);
3529 errs() << "Predecessor " << printMBBReference(*Pred
)
3530 << " has exit state (" << SPState
[Pred
->getNumber()].ExitValue
3531 << ", " << SPState
[Pred
->getNumber()].ExitIsSetup
<< "), while "
3532 << printMBBReference(*MBB
) << " has entry state ("
3533 << BBState
.EntryValue
<< ", " << BBState
.EntryIsSetup
<< ").\n";
3537 // Make sure the entry state of any successor is consistent with the exit
3539 for (const MachineBasicBlock
*Succ
: MBB
->successors()) {
3540 if (Reachable
.count(Succ
) &&
3541 (SPState
[Succ
->getNumber()].EntryValue
!= BBState
.ExitValue
||
3542 SPState
[Succ
->getNumber()].EntryIsSetup
!= BBState
.ExitIsSetup
)) {
3543 report("The entry stack state of a successor is inconsistent.", MBB
);
3544 errs() << "Successor " << printMBBReference(*Succ
)
3545 << " has entry state (" << SPState
[Succ
->getNumber()].EntryValue
3546 << ", " << SPState
[Succ
->getNumber()].EntryIsSetup
<< "), while "
3547 << printMBBReference(*MBB
) << " has exit state ("
3548 << BBState
.ExitValue
<< ", " << BBState
.ExitIsSetup
<< ").\n";
3552 // Make sure a basic block with return ends with zero stack adjustment.
3553 if (!MBB
->empty() && MBB
->back().isReturn()) {
3554 if (BBState
.ExitIsSetup
)
3555 report("A return block ends with a FrameSetup.", MBB
);
3556 if (BBState
.ExitValue
)
3557 report("A return block ends with a nonzero stack adjustment.", MBB
);