[Alignment][NFC] Migrate Instructions to Align
[llvm-core.git] / lib / IR / Verifier.cpp
blob4cd8b367d53ec7a6e6206f9ae1dd3ffc15dc20ae
1 //===-- Verifier.cpp - Implement the Module Verifier -----------------------==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the function verifier interface, that can be used for some
10 // sanity checking of input to the system.
12 // Note that this does not provide full `Java style' security and verifications,
13 // instead it just tries to ensure that code is well-formed.
15 // * Both of a binary operator's parameters are of the same type
16 // * Verify that the indices of mem access instructions match other operands
17 // * Verify that arithmetic and other things are only performed on first-class
18 // types. Verify that shifts & logicals only happen on integrals f.e.
19 // * All of the constants in a switch statement are of the correct type
20 // * The code is in valid SSA form
21 // * It should be illegal to put a label into any other type (like a structure)
22 // or to return one. [except constant arrays!]
23 // * Only phi nodes can be self referential: 'add i32 %0, %0 ; <int>:0' is bad
24 // * PHI nodes must have an entry for each predecessor, with no extras.
25 // * PHI nodes must be the first thing in a basic block, all grouped together
26 // * PHI nodes must have at least one entry
27 // * All basic blocks should only end with terminator insts, not contain them
28 // * The entry node to a function must not have predecessors
29 // * All Instructions must be embedded into a basic block
30 // * Functions cannot take a void-typed parameter
31 // * Verify that a function's argument list agrees with it's declared type.
32 // * It is illegal to specify a name for a void value.
33 // * It is illegal to have a internal global value with no initializer
34 // * It is illegal to have a ret instruction that returns a value that does not
35 // agree with the function return value type.
36 // * Function call argument types match the function prototype
37 // * A landing pad is defined by a landingpad instruction, and can be jumped to
38 // only by the unwind edge of an invoke instruction.
39 // * A landingpad instruction must be the first non-PHI instruction in the
40 // block.
41 // * Landingpad instructions must be in a function with a personality function.
42 // * All other things that are tested by asserts spread about the code...
44 //===----------------------------------------------------------------------===//
46 #include "llvm/IR/Verifier.h"
47 #include "llvm/ADT/APFloat.h"
48 #include "llvm/ADT/APInt.h"
49 #include "llvm/ADT/ArrayRef.h"
50 #include "llvm/ADT/DenseMap.h"
51 #include "llvm/ADT/MapVector.h"
52 #include "llvm/ADT/Optional.h"
53 #include "llvm/ADT/STLExtras.h"
54 #include "llvm/ADT/SmallPtrSet.h"
55 #include "llvm/ADT/SmallSet.h"
56 #include "llvm/ADT/SmallVector.h"
57 #include "llvm/ADT/StringExtras.h"
58 #include "llvm/ADT/StringMap.h"
59 #include "llvm/ADT/StringRef.h"
60 #include "llvm/ADT/Twine.h"
61 #include "llvm/ADT/ilist.h"
62 #include "llvm/BinaryFormat/Dwarf.h"
63 #include "llvm/IR/Argument.h"
64 #include "llvm/IR/Attributes.h"
65 #include "llvm/IR/BasicBlock.h"
66 #include "llvm/IR/CFG.h"
67 #include "llvm/IR/CallingConv.h"
68 #include "llvm/IR/Comdat.h"
69 #include "llvm/IR/Constant.h"
70 #include "llvm/IR/ConstantRange.h"
71 #include "llvm/IR/Constants.h"
72 #include "llvm/IR/DataLayout.h"
73 #include "llvm/IR/DebugInfo.h"
74 #include "llvm/IR/DebugInfoMetadata.h"
75 #include "llvm/IR/DebugLoc.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Dominators.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GlobalAlias.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/GlobalVariable.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstVisitor.h"
84 #include "llvm/IR/InstrTypes.h"
85 #include "llvm/IR/Instruction.h"
86 #include "llvm/IR/Instructions.h"
87 #include "llvm/IR/IntrinsicInst.h"
88 #include "llvm/IR/Intrinsics.h"
89 #include "llvm/IR/LLVMContext.h"
90 #include "llvm/IR/Metadata.h"
91 #include "llvm/IR/Module.h"
92 #include "llvm/IR/ModuleSlotTracker.h"
93 #include "llvm/IR/PassManager.h"
94 #include "llvm/IR/Statepoint.h"
95 #include "llvm/IR/Type.h"
96 #include "llvm/IR/Use.h"
97 #include "llvm/IR/User.h"
98 #include "llvm/IR/Value.h"
99 #include "llvm/Pass.h"
100 #include "llvm/Support/AtomicOrdering.h"
101 #include "llvm/Support/Casting.h"
102 #include "llvm/Support/CommandLine.h"
103 #include "llvm/Support/Debug.h"
104 #include "llvm/Support/ErrorHandling.h"
105 #include "llvm/Support/MathExtras.h"
106 #include "llvm/Support/raw_ostream.h"
107 #include <algorithm>
108 #include <cassert>
109 #include <cstdint>
110 #include <memory>
111 #include <string>
112 #include <utility>
114 using namespace llvm;
116 namespace llvm {
118 struct VerifierSupport {
119 raw_ostream *OS;
120 const Module &M;
121 ModuleSlotTracker MST;
122 Triple TT;
123 const DataLayout &DL;
124 LLVMContext &Context;
126 /// Track the brokenness of the module while recursively visiting.
127 bool Broken = false;
128 /// Broken debug info can be "recovered" from by stripping the debug info.
129 bool BrokenDebugInfo = false;
130 /// Whether to treat broken debug info as an error.
131 bool TreatBrokenDebugInfoAsError = true;
133 explicit VerifierSupport(raw_ostream *OS, const Module &M)
134 : OS(OS), M(M), MST(&M), TT(M.getTargetTriple()), DL(M.getDataLayout()),
135 Context(M.getContext()) {}
137 private:
138 void Write(const Module *M) {
139 *OS << "; ModuleID = '" << M->getModuleIdentifier() << "'\n";
142 void Write(const Value *V) {
143 if (V)
144 Write(*V);
147 void Write(const Value &V) {
148 if (isa<Instruction>(V)) {
149 V.print(*OS, MST);
150 *OS << '\n';
151 } else {
152 V.printAsOperand(*OS, true, MST);
153 *OS << '\n';
157 void Write(const Metadata *MD) {
158 if (!MD)
159 return;
160 MD->print(*OS, MST, &M);
161 *OS << '\n';
164 template <class T> void Write(const MDTupleTypedArrayWrapper<T> &MD) {
165 Write(MD.get());
168 void Write(const NamedMDNode *NMD) {
169 if (!NMD)
170 return;
171 NMD->print(*OS, MST);
172 *OS << '\n';
175 void Write(Type *T) {
176 if (!T)
177 return;
178 *OS << ' ' << *T;
181 void Write(const Comdat *C) {
182 if (!C)
183 return;
184 *OS << *C;
187 void Write(const APInt *AI) {
188 if (!AI)
189 return;
190 *OS << *AI << '\n';
193 void Write(const unsigned i) { *OS << i << '\n'; }
195 template <typename T> void Write(ArrayRef<T> Vs) {
196 for (const T &V : Vs)
197 Write(V);
200 template <typename T1, typename... Ts>
201 void WriteTs(const T1 &V1, const Ts &... Vs) {
202 Write(V1);
203 WriteTs(Vs...);
206 template <typename... Ts> void WriteTs() {}
208 public:
209 /// A check failed, so printout out the condition and the message.
211 /// This provides a nice place to put a breakpoint if you want to see why
212 /// something is not correct.
213 void CheckFailed(const Twine &Message) {
214 if (OS)
215 *OS << Message << '\n';
216 Broken = true;
219 /// A check failed (with values to print).
221 /// This calls the Message-only version so that the above is easier to set a
222 /// breakpoint on.
223 template <typename T1, typename... Ts>
224 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
225 CheckFailed(Message);
226 if (OS)
227 WriteTs(V1, Vs...);
230 /// A debug info check failed.
231 void DebugInfoCheckFailed(const Twine &Message) {
232 if (OS)
233 *OS << Message << '\n';
234 Broken |= TreatBrokenDebugInfoAsError;
235 BrokenDebugInfo = true;
238 /// A debug info check failed (with values to print).
239 template <typename T1, typename... Ts>
240 void DebugInfoCheckFailed(const Twine &Message, const T1 &V1,
241 const Ts &... Vs) {
242 DebugInfoCheckFailed(Message);
243 if (OS)
244 WriteTs(V1, Vs...);
248 } // namespace llvm
250 namespace {
252 class Verifier : public InstVisitor<Verifier>, VerifierSupport {
253 friend class InstVisitor<Verifier>;
255 DominatorTree DT;
257 /// When verifying a basic block, keep track of all of the
258 /// instructions we have seen so far.
260 /// This allows us to do efficient dominance checks for the case when an
261 /// instruction has an operand that is an instruction in the same block.
262 SmallPtrSet<Instruction *, 16> InstsInThisBlock;
264 /// Keep track of the metadata nodes that have been checked already.
265 SmallPtrSet<const Metadata *, 32> MDNodes;
267 /// Keep track which DISubprogram is attached to which function.
268 DenseMap<const DISubprogram *, const Function *> DISubprogramAttachments;
270 /// Track all DICompileUnits visited.
271 SmallPtrSet<const Metadata *, 2> CUVisited;
273 /// The result type for a landingpad.
274 Type *LandingPadResultTy;
276 /// Whether we've seen a call to @llvm.localescape in this function
277 /// already.
278 bool SawFrameEscape;
280 /// Whether the current function has a DISubprogram attached to it.
281 bool HasDebugInfo = false;
283 /// Whether source was present on the first DIFile encountered in each CU.
284 DenseMap<const DICompileUnit *, bool> HasSourceDebugInfo;
286 /// Stores the count of how many objects were passed to llvm.localescape for a
287 /// given function and the largest index passed to llvm.localrecover.
288 DenseMap<Function *, std::pair<unsigned, unsigned>> FrameEscapeInfo;
290 // Maps catchswitches and cleanuppads that unwind to siblings to the
291 // terminators that indicate the unwind, used to detect cycles therein.
292 MapVector<Instruction *, Instruction *> SiblingFuncletInfo;
294 /// Cache of constants visited in search of ConstantExprs.
295 SmallPtrSet<const Constant *, 32> ConstantExprVisited;
297 /// Cache of declarations of the llvm.experimental.deoptimize.<ty> intrinsic.
298 SmallVector<const Function *, 4> DeoptimizeDeclarations;
300 // Verify that this GlobalValue is only used in this module.
301 // This map is used to avoid visiting uses twice. We can arrive at a user
302 // twice, if they have multiple operands. In particular for very large
303 // constant expressions, we can arrive at a particular user many times.
304 SmallPtrSet<const Value *, 32> GlobalValueVisited;
306 // Keeps track of duplicate function argument debug info.
307 SmallVector<const DILocalVariable *, 16> DebugFnArgs;
309 TBAAVerifier TBAAVerifyHelper;
311 void checkAtomicMemAccessSize(Type *Ty, const Instruction *I);
313 public:
314 explicit Verifier(raw_ostream *OS, bool ShouldTreatBrokenDebugInfoAsError,
315 const Module &M)
316 : VerifierSupport(OS, M), LandingPadResultTy(nullptr),
317 SawFrameEscape(false), TBAAVerifyHelper(this) {
318 TreatBrokenDebugInfoAsError = ShouldTreatBrokenDebugInfoAsError;
321 bool hasBrokenDebugInfo() const { return BrokenDebugInfo; }
323 bool verify(const Function &F) {
324 assert(F.getParent() == &M &&
325 "An instance of this class only works with a specific module!");
327 // First ensure the function is well-enough formed to compute dominance
328 // information, and directly compute a dominance tree. We don't rely on the
329 // pass manager to provide this as it isolates us from a potentially
330 // out-of-date dominator tree and makes it significantly more complex to run
331 // this code outside of a pass manager.
332 // FIXME: It's really gross that we have to cast away constness here.
333 if (!F.empty())
334 DT.recalculate(const_cast<Function &>(F));
336 for (const BasicBlock &BB : F) {
337 if (!BB.empty() && BB.back().isTerminator())
338 continue;
340 if (OS) {
341 *OS << "Basic Block in function '" << F.getName()
342 << "' does not have terminator!\n";
343 BB.printAsOperand(*OS, true, MST);
344 *OS << "\n";
346 return false;
349 Broken = false;
350 // FIXME: We strip const here because the inst visitor strips const.
351 visit(const_cast<Function &>(F));
352 verifySiblingFuncletUnwinds();
353 InstsInThisBlock.clear();
354 DebugFnArgs.clear();
355 LandingPadResultTy = nullptr;
356 SawFrameEscape = false;
357 SiblingFuncletInfo.clear();
359 return !Broken;
362 /// Verify the module that this instance of \c Verifier was initialized with.
363 bool verify() {
364 Broken = false;
366 // Collect all declarations of the llvm.experimental.deoptimize intrinsic.
367 for (const Function &F : M)
368 if (F.getIntrinsicID() == Intrinsic::experimental_deoptimize)
369 DeoptimizeDeclarations.push_back(&F);
371 // Now that we've visited every function, verify that we never asked to
372 // recover a frame index that wasn't escaped.
373 verifyFrameRecoverIndices();
374 for (const GlobalVariable &GV : M.globals())
375 visitGlobalVariable(GV);
377 for (const GlobalAlias &GA : M.aliases())
378 visitGlobalAlias(GA);
380 for (const NamedMDNode &NMD : M.named_metadata())
381 visitNamedMDNode(NMD);
383 for (const StringMapEntry<Comdat> &SMEC : M.getComdatSymbolTable())
384 visitComdat(SMEC.getValue());
386 visitModuleFlags(M);
387 visitModuleIdents(M);
388 visitModuleCommandLines(M);
390 verifyCompileUnits();
392 verifyDeoptimizeCallingConvs();
393 DISubprogramAttachments.clear();
394 return !Broken;
397 private:
398 // Verification methods...
399 void visitGlobalValue(const GlobalValue &GV);
400 void visitGlobalVariable(const GlobalVariable &GV);
401 void visitGlobalAlias(const GlobalAlias &GA);
402 void visitAliaseeSubExpr(const GlobalAlias &A, const Constant &C);
403 void visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias *> &Visited,
404 const GlobalAlias &A, const Constant &C);
405 void visitNamedMDNode(const NamedMDNode &NMD);
406 void visitMDNode(const MDNode &MD);
407 void visitMetadataAsValue(const MetadataAsValue &MD, Function *F);
408 void visitValueAsMetadata(const ValueAsMetadata &MD, Function *F);
409 void visitComdat(const Comdat &C);
410 void visitModuleIdents(const Module &M);
411 void visitModuleCommandLines(const Module &M);
412 void visitModuleFlags(const Module &M);
413 void visitModuleFlag(const MDNode *Op,
414 DenseMap<const MDString *, const MDNode *> &SeenIDs,
415 SmallVectorImpl<const MDNode *> &Requirements);
416 void visitModuleFlagCGProfileEntry(const MDOperand &MDO);
417 void visitFunction(const Function &F);
418 void visitBasicBlock(BasicBlock &BB);
419 void visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty);
420 void visitDereferenceableMetadata(Instruction &I, MDNode *MD);
421 void visitProfMetadata(Instruction &I, MDNode *MD);
423 template <class Ty> bool isValidMetadataArray(const MDTuple &N);
424 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) void visit##CLASS(const CLASS &N);
425 #include "llvm/IR/Metadata.def"
426 void visitDIScope(const DIScope &N);
427 void visitDIVariable(const DIVariable &N);
428 void visitDILexicalBlockBase(const DILexicalBlockBase &N);
429 void visitDITemplateParameter(const DITemplateParameter &N);
431 void visitTemplateParams(const MDNode &N, const Metadata &RawParams);
433 // InstVisitor overrides...
434 using InstVisitor<Verifier>::visit;
435 void visit(Instruction &I);
437 void visitTruncInst(TruncInst &I);
438 void visitZExtInst(ZExtInst &I);
439 void visitSExtInst(SExtInst &I);
440 void visitFPTruncInst(FPTruncInst &I);
441 void visitFPExtInst(FPExtInst &I);
442 void visitFPToUIInst(FPToUIInst &I);
443 void visitFPToSIInst(FPToSIInst &I);
444 void visitUIToFPInst(UIToFPInst &I);
445 void visitSIToFPInst(SIToFPInst &I);
446 void visitIntToPtrInst(IntToPtrInst &I);
447 void visitPtrToIntInst(PtrToIntInst &I);
448 void visitBitCastInst(BitCastInst &I);
449 void visitAddrSpaceCastInst(AddrSpaceCastInst &I);
450 void visitPHINode(PHINode &PN);
451 void visitCallBase(CallBase &Call);
452 void visitUnaryOperator(UnaryOperator &U);
453 void visitBinaryOperator(BinaryOperator &B);
454 void visitICmpInst(ICmpInst &IC);
455 void visitFCmpInst(FCmpInst &FC);
456 void visitExtractElementInst(ExtractElementInst &EI);
457 void visitInsertElementInst(InsertElementInst &EI);
458 void visitShuffleVectorInst(ShuffleVectorInst &EI);
459 void visitVAArgInst(VAArgInst &VAA) { visitInstruction(VAA); }
460 void visitCallInst(CallInst &CI);
461 void visitInvokeInst(InvokeInst &II);
462 void visitGetElementPtrInst(GetElementPtrInst &GEP);
463 void visitLoadInst(LoadInst &LI);
464 void visitStoreInst(StoreInst &SI);
465 void verifyDominatesUse(Instruction &I, unsigned i);
466 void visitInstruction(Instruction &I);
467 void visitTerminator(Instruction &I);
468 void visitBranchInst(BranchInst &BI);
469 void visitReturnInst(ReturnInst &RI);
470 void visitSwitchInst(SwitchInst &SI);
471 void visitIndirectBrInst(IndirectBrInst &BI);
472 void visitCallBrInst(CallBrInst &CBI);
473 void visitSelectInst(SelectInst &SI);
474 void visitUserOp1(Instruction &I);
475 void visitUserOp2(Instruction &I) { visitUserOp1(I); }
476 void visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call);
477 void visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI);
478 void visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII);
479 void visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI);
480 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI);
481 void visitAtomicRMWInst(AtomicRMWInst &RMWI);
482 void visitFenceInst(FenceInst &FI);
483 void visitAllocaInst(AllocaInst &AI);
484 void visitExtractValueInst(ExtractValueInst &EVI);
485 void visitInsertValueInst(InsertValueInst &IVI);
486 void visitEHPadPredecessors(Instruction &I);
487 void visitLandingPadInst(LandingPadInst &LPI);
488 void visitResumeInst(ResumeInst &RI);
489 void visitCatchPadInst(CatchPadInst &CPI);
490 void visitCatchReturnInst(CatchReturnInst &CatchReturn);
491 void visitCleanupPadInst(CleanupPadInst &CPI);
492 void visitFuncletPadInst(FuncletPadInst &FPI);
493 void visitCatchSwitchInst(CatchSwitchInst &CatchSwitch);
494 void visitCleanupReturnInst(CleanupReturnInst &CRI);
496 void verifySwiftErrorCall(CallBase &Call, const Value *SwiftErrorVal);
497 void verifySwiftErrorValue(const Value *SwiftErrorVal);
498 void verifyMustTailCall(CallInst &CI);
499 bool performTypeCheck(Intrinsic::ID ID, Function *F, Type *Ty, int VT,
500 unsigned ArgNo, std::string &Suffix);
501 bool verifyAttributeCount(AttributeList Attrs, unsigned Params);
502 void verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
503 const Value *V);
504 void verifyParameterAttrs(AttributeSet Attrs, Type *Ty, const Value *V);
505 void verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
506 const Value *V, bool IsIntrinsic);
507 void verifyFunctionMetadata(ArrayRef<std::pair<unsigned, MDNode *>> MDs);
509 void visitConstantExprsRecursively(const Constant *EntryC);
510 void visitConstantExpr(const ConstantExpr *CE);
511 void verifyStatepoint(const CallBase &Call);
512 void verifyFrameRecoverIndices();
513 void verifySiblingFuncletUnwinds();
515 void verifyFragmentExpression(const DbgVariableIntrinsic &I);
516 template <typename ValueOrMetadata>
517 void verifyFragmentExpression(const DIVariable &V,
518 DIExpression::FragmentInfo Fragment,
519 ValueOrMetadata *Desc);
520 void verifyFnArgs(const DbgVariableIntrinsic &I);
522 /// Module-level debug info verification...
523 void verifyCompileUnits();
525 /// Module-level verification that all @llvm.experimental.deoptimize
526 /// declarations share the same calling convention.
527 void verifyDeoptimizeCallingConvs();
529 /// Verify all-or-nothing property of DIFile source attribute within a CU.
530 void verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F);
533 } // end anonymous namespace
535 /// We know that cond should be true, if not print an error message.
536 #define Assert(C, ...) \
537 do { if (!(C)) { CheckFailed(__VA_ARGS__); return; } } while (false)
539 /// We know that a debug info condition should be true, if not print
540 /// an error message.
541 #define AssertDI(C, ...) \
542 do { if (!(C)) { DebugInfoCheckFailed(__VA_ARGS__); return; } } while (false)
544 void Verifier::visit(Instruction &I) {
545 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i)
546 Assert(I.getOperand(i) != nullptr, "Operand is null", &I);
547 InstVisitor<Verifier>::visit(I);
550 // Helper to recursively iterate over indirect users. By
551 // returning false, the callback can ask to stop recursing
552 // further.
553 static void forEachUser(const Value *User,
554 SmallPtrSet<const Value *, 32> &Visited,
555 llvm::function_ref<bool(const Value *)> Callback) {
556 if (!Visited.insert(User).second)
557 return;
558 for (const Value *TheNextUser : User->materialized_users())
559 if (Callback(TheNextUser))
560 forEachUser(TheNextUser, Visited, Callback);
563 void Verifier::visitGlobalValue(const GlobalValue &GV) {
564 Assert(!GV.isDeclaration() || GV.hasValidDeclarationLinkage(),
565 "Global is external, but doesn't have external or weak linkage!", &GV);
567 Assert(GV.getAlignment() <= Value::MaximumAlignment,
568 "huge alignment values are unsupported", &GV);
569 Assert(!GV.hasAppendingLinkage() || isa<GlobalVariable>(GV),
570 "Only global variables can have appending linkage!", &GV);
572 if (GV.hasAppendingLinkage()) {
573 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(&GV);
574 Assert(GVar && GVar->getValueType()->isArrayTy(),
575 "Only global arrays can have appending linkage!", GVar);
578 if (GV.isDeclarationForLinker())
579 Assert(!GV.hasComdat(), "Declaration may not be in a Comdat!", &GV);
581 if (GV.hasDLLImportStorageClass()) {
582 Assert(!GV.isDSOLocal(),
583 "GlobalValue with DLLImport Storage is dso_local!", &GV);
585 Assert((GV.isDeclaration() && GV.hasExternalLinkage()) ||
586 GV.hasAvailableExternallyLinkage(),
587 "Global is marked as dllimport, but not external", &GV);
590 if (GV.hasLocalLinkage())
591 Assert(GV.isDSOLocal(),
592 "GlobalValue with private or internal linkage must be dso_local!",
593 &GV);
595 if (!GV.hasDefaultVisibility() && !GV.hasExternalWeakLinkage())
596 Assert(GV.isDSOLocal(),
597 "GlobalValue with non default visibility must be dso_local!", &GV);
599 forEachUser(&GV, GlobalValueVisited, [&](const Value *V) -> bool {
600 if (const Instruction *I = dyn_cast<Instruction>(V)) {
601 if (!I->getParent() || !I->getParent()->getParent())
602 CheckFailed("Global is referenced by parentless instruction!", &GV, &M,
604 else if (I->getParent()->getParent()->getParent() != &M)
605 CheckFailed("Global is referenced in a different module!", &GV, &M, I,
606 I->getParent()->getParent(),
607 I->getParent()->getParent()->getParent());
608 return false;
609 } else if (const Function *F = dyn_cast<Function>(V)) {
610 if (F->getParent() != &M)
611 CheckFailed("Global is used by function in a different module", &GV, &M,
612 F, F->getParent());
613 return false;
615 return true;
619 void Verifier::visitGlobalVariable(const GlobalVariable &GV) {
620 if (GV.hasInitializer()) {
621 Assert(GV.getInitializer()->getType() == GV.getValueType(),
622 "Global variable initializer type does not match global "
623 "variable type!",
624 &GV);
625 // If the global has common linkage, it must have a zero initializer and
626 // cannot be constant.
627 if (GV.hasCommonLinkage()) {
628 Assert(GV.getInitializer()->isNullValue(),
629 "'common' global must have a zero initializer!", &GV);
630 Assert(!GV.isConstant(), "'common' global may not be marked constant!",
631 &GV);
632 Assert(!GV.hasComdat(), "'common' global may not be in a Comdat!", &GV);
636 if (GV.hasName() && (GV.getName() == "llvm.global_ctors" ||
637 GV.getName() == "llvm.global_dtors")) {
638 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
639 "invalid linkage for intrinsic global variable", &GV);
640 // Don't worry about emitting an error for it not being an array,
641 // visitGlobalValue will complain on appending non-array.
642 if (ArrayType *ATy = dyn_cast<ArrayType>(GV.getValueType())) {
643 StructType *STy = dyn_cast<StructType>(ATy->getElementType());
644 PointerType *FuncPtrTy =
645 FunctionType::get(Type::getVoidTy(Context), false)->
646 getPointerTo(DL.getProgramAddressSpace());
647 Assert(STy &&
648 (STy->getNumElements() == 2 || STy->getNumElements() == 3) &&
649 STy->getTypeAtIndex(0u)->isIntegerTy(32) &&
650 STy->getTypeAtIndex(1) == FuncPtrTy,
651 "wrong type for intrinsic global variable", &GV);
652 Assert(STy->getNumElements() == 3,
653 "the third field of the element type is mandatory, "
654 "specify i8* null to migrate from the obsoleted 2-field form");
655 Type *ETy = STy->getTypeAtIndex(2);
656 Assert(ETy->isPointerTy() &&
657 cast<PointerType>(ETy)->getElementType()->isIntegerTy(8),
658 "wrong type for intrinsic global variable", &GV);
662 if (GV.hasName() && (GV.getName() == "llvm.used" ||
663 GV.getName() == "llvm.compiler.used")) {
664 Assert(!GV.hasInitializer() || GV.hasAppendingLinkage(),
665 "invalid linkage for intrinsic global variable", &GV);
666 Type *GVType = GV.getValueType();
667 if (ArrayType *ATy = dyn_cast<ArrayType>(GVType)) {
668 PointerType *PTy = dyn_cast<PointerType>(ATy->getElementType());
669 Assert(PTy, "wrong type for intrinsic global variable", &GV);
670 if (GV.hasInitializer()) {
671 const Constant *Init = GV.getInitializer();
672 const ConstantArray *InitArray = dyn_cast<ConstantArray>(Init);
673 Assert(InitArray, "wrong initalizer for intrinsic global variable",
674 Init);
675 for (Value *Op : InitArray->operands()) {
676 Value *V = Op->stripPointerCasts();
677 Assert(isa<GlobalVariable>(V) || isa<Function>(V) ||
678 isa<GlobalAlias>(V),
679 "invalid llvm.used member", V);
680 Assert(V->hasName(), "members of llvm.used must be named", V);
686 // Visit any debug info attachments.
687 SmallVector<MDNode *, 1> MDs;
688 GV.getMetadata(LLVMContext::MD_dbg, MDs);
689 for (auto *MD : MDs) {
690 if (auto *GVE = dyn_cast<DIGlobalVariableExpression>(MD))
691 visitDIGlobalVariableExpression(*GVE);
692 else
693 AssertDI(false, "!dbg attachment of global variable must be a "
694 "DIGlobalVariableExpression");
697 // Scalable vectors cannot be global variables, since we don't know
698 // the runtime size. If the global is a struct or an array containing
699 // scalable vectors, that will be caught by the isValidElementType methods
700 // in StructType or ArrayType instead.
701 if (auto *VTy = dyn_cast<VectorType>(GV.getValueType()))
702 Assert(!VTy->isScalable(), "Globals cannot contain scalable vectors", &GV);
704 if (!GV.hasInitializer()) {
705 visitGlobalValue(GV);
706 return;
709 // Walk any aggregate initializers looking for bitcasts between address spaces
710 visitConstantExprsRecursively(GV.getInitializer());
712 visitGlobalValue(GV);
715 void Verifier::visitAliaseeSubExpr(const GlobalAlias &GA, const Constant &C) {
716 SmallPtrSet<const GlobalAlias*, 4> Visited;
717 Visited.insert(&GA);
718 visitAliaseeSubExpr(Visited, GA, C);
721 void Verifier::visitAliaseeSubExpr(SmallPtrSetImpl<const GlobalAlias*> &Visited,
722 const GlobalAlias &GA, const Constant &C) {
723 if (const auto *GV = dyn_cast<GlobalValue>(&C)) {
724 Assert(!GV->isDeclarationForLinker(), "Alias must point to a definition",
725 &GA);
727 if (const auto *GA2 = dyn_cast<GlobalAlias>(GV)) {
728 Assert(Visited.insert(GA2).second, "Aliases cannot form a cycle", &GA);
730 Assert(!GA2->isInterposable(), "Alias cannot point to an interposable alias",
731 &GA);
732 } else {
733 // Only continue verifying subexpressions of GlobalAliases.
734 // Do not recurse into global initializers.
735 return;
739 if (const auto *CE = dyn_cast<ConstantExpr>(&C))
740 visitConstantExprsRecursively(CE);
742 for (const Use &U : C.operands()) {
743 Value *V = &*U;
744 if (const auto *GA2 = dyn_cast<GlobalAlias>(V))
745 visitAliaseeSubExpr(Visited, GA, *GA2->getAliasee());
746 else if (const auto *C2 = dyn_cast<Constant>(V))
747 visitAliaseeSubExpr(Visited, GA, *C2);
751 void Verifier::visitGlobalAlias(const GlobalAlias &GA) {
752 Assert(GlobalAlias::isValidLinkage(GA.getLinkage()),
753 "Alias should have private, internal, linkonce, weak, linkonce_odr, "
754 "weak_odr, or external linkage!",
755 &GA);
756 const Constant *Aliasee = GA.getAliasee();
757 Assert(Aliasee, "Aliasee cannot be NULL!", &GA);
758 Assert(GA.getType() == Aliasee->getType(),
759 "Alias and aliasee types should match!", &GA);
761 Assert(isa<GlobalValue>(Aliasee) || isa<ConstantExpr>(Aliasee),
762 "Aliasee should be either GlobalValue or ConstantExpr", &GA);
764 visitAliaseeSubExpr(GA, *Aliasee);
766 visitGlobalValue(GA);
769 void Verifier::visitNamedMDNode(const NamedMDNode &NMD) {
770 // There used to be various other llvm.dbg.* nodes, but we don't support
771 // upgrading them and we want to reserve the namespace for future uses.
772 if (NMD.getName().startswith("llvm.dbg."))
773 AssertDI(NMD.getName() == "llvm.dbg.cu",
774 "unrecognized named metadata node in the llvm.dbg namespace",
775 &NMD);
776 for (const MDNode *MD : NMD.operands()) {
777 if (NMD.getName() == "llvm.dbg.cu")
778 AssertDI(MD && isa<DICompileUnit>(MD), "invalid compile unit", &NMD, MD);
780 if (!MD)
781 continue;
783 visitMDNode(*MD);
787 void Verifier::visitMDNode(const MDNode &MD) {
788 // Only visit each node once. Metadata can be mutually recursive, so this
789 // avoids infinite recursion here, as well as being an optimization.
790 if (!MDNodes.insert(&MD).second)
791 return;
793 switch (MD.getMetadataID()) {
794 default:
795 llvm_unreachable("Invalid MDNode subclass");
796 case Metadata::MDTupleKind:
797 break;
798 #define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) \
799 case Metadata::CLASS##Kind: \
800 visit##CLASS(cast<CLASS>(MD)); \
801 break;
802 #include "llvm/IR/Metadata.def"
805 for (const Metadata *Op : MD.operands()) {
806 if (!Op)
807 continue;
808 Assert(!isa<LocalAsMetadata>(Op), "Invalid operand for global metadata!",
809 &MD, Op);
810 if (auto *N = dyn_cast<MDNode>(Op)) {
811 visitMDNode(*N);
812 continue;
814 if (auto *V = dyn_cast<ValueAsMetadata>(Op)) {
815 visitValueAsMetadata(*V, nullptr);
816 continue;
820 // Check these last, so we diagnose problems in operands first.
821 Assert(!MD.isTemporary(), "Expected no forward declarations!", &MD);
822 Assert(MD.isResolved(), "All nodes should be resolved!", &MD);
825 void Verifier::visitValueAsMetadata(const ValueAsMetadata &MD, Function *F) {
826 Assert(MD.getValue(), "Expected valid value", &MD);
827 Assert(!MD.getValue()->getType()->isMetadataTy(),
828 "Unexpected metadata round-trip through values", &MD, MD.getValue());
830 auto *L = dyn_cast<LocalAsMetadata>(&MD);
831 if (!L)
832 return;
834 Assert(F, "function-local metadata used outside a function", L);
836 // If this was an instruction, bb, or argument, verify that it is in the
837 // function that we expect.
838 Function *ActualF = nullptr;
839 if (Instruction *I = dyn_cast<Instruction>(L->getValue())) {
840 Assert(I->getParent(), "function-local metadata not in basic block", L, I);
841 ActualF = I->getParent()->getParent();
842 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(L->getValue()))
843 ActualF = BB->getParent();
844 else if (Argument *A = dyn_cast<Argument>(L->getValue()))
845 ActualF = A->getParent();
846 assert(ActualF && "Unimplemented function local metadata case!");
848 Assert(ActualF == F, "function-local metadata used in wrong function", L);
851 void Verifier::visitMetadataAsValue(const MetadataAsValue &MDV, Function *F) {
852 Metadata *MD = MDV.getMetadata();
853 if (auto *N = dyn_cast<MDNode>(MD)) {
854 visitMDNode(*N);
855 return;
858 // Only visit each node once. Metadata can be mutually recursive, so this
859 // avoids infinite recursion here, as well as being an optimization.
860 if (!MDNodes.insert(MD).second)
861 return;
863 if (auto *V = dyn_cast<ValueAsMetadata>(MD))
864 visitValueAsMetadata(*V, F);
867 static bool isType(const Metadata *MD) { return !MD || isa<DIType>(MD); }
868 static bool isScope(const Metadata *MD) { return !MD || isa<DIScope>(MD); }
869 static bool isDINode(const Metadata *MD) { return !MD || isa<DINode>(MD); }
871 void Verifier::visitDILocation(const DILocation &N) {
872 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
873 "location requires a valid scope", &N, N.getRawScope());
874 if (auto *IA = N.getRawInlinedAt())
875 AssertDI(isa<DILocation>(IA), "inlined-at should be a location", &N, IA);
876 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
877 AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
880 void Verifier::visitGenericDINode(const GenericDINode &N) {
881 AssertDI(N.getTag(), "invalid tag", &N);
884 void Verifier::visitDIScope(const DIScope &N) {
885 if (auto *F = N.getRawFile())
886 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
889 void Verifier::visitDISubrange(const DISubrange &N) {
890 AssertDI(N.getTag() == dwarf::DW_TAG_subrange_type, "invalid tag", &N);
891 auto Count = N.getCount();
892 AssertDI(Count, "Count must either be a signed constant or a DIVariable",
893 &N);
894 AssertDI(!Count.is<ConstantInt*>() ||
895 Count.get<ConstantInt*>()->getSExtValue() >= -1,
896 "invalid subrange count", &N);
899 void Verifier::visitDIEnumerator(const DIEnumerator &N) {
900 AssertDI(N.getTag() == dwarf::DW_TAG_enumerator, "invalid tag", &N);
903 void Verifier::visitDIBasicType(const DIBasicType &N) {
904 AssertDI(N.getTag() == dwarf::DW_TAG_base_type ||
905 N.getTag() == dwarf::DW_TAG_unspecified_type,
906 "invalid tag", &N);
907 AssertDI(!(N.isBigEndian() && N.isLittleEndian()) ,
908 "has conflicting flags", &N);
911 void Verifier::visitDIDerivedType(const DIDerivedType &N) {
912 // Common scope checks.
913 visitDIScope(N);
915 AssertDI(N.getTag() == dwarf::DW_TAG_typedef ||
916 N.getTag() == dwarf::DW_TAG_pointer_type ||
917 N.getTag() == dwarf::DW_TAG_ptr_to_member_type ||
918 N.getTag() == dwarf::DW_TAG_reference_type ||
919 N.getTag() == dwarf::DW_TAG_rvalue_reference_type ||
920 N.getTag() == dwarf::DW_TAG_const_type ||
921 N.getTag() == dwarf::DW_TAG_volatile_type ||
922 N.getTag() == dwarf::DW_TAG_restrict_type ||
923 N.getTag() == dwarf::DW_TAG_atomic_type ||
924 N.getTag() == dwarf::DW_TAG_member ||
925 N.getTag() == dwarf::DW_TAG_inheritance ||
926 N.getTag() == dwarf::DW_TAG_friend,
927 "invalid tag", &N);
928 if (N.getTag() == dwarf::DW_TAG_ptr_to_member_type) {
929 AssertDI(isType(N.getRawExtraData()), "invalid pointer to member type", &N,
930 N.getRawExtraData());
933 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
934 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
935 N.getRawBaseType());
937 if (N.getDWARFAddressSpace()) {
938 AssertDI(N.getTag() == dwarf::DW_TAG_pointer_type ||
939 N.getTag() == dwarf::DW_TAG_reference_type ||
940 N.getTag() == dwarf::DW_TAG_rvalue_reference_type,
941 "DWARF address space only applies to pointer or reference types",
942 &N);
946 /// Detect mutually exclusive flags.
947 static bool hasConflictingReferenceFlags(unsigned Flags) {
948 return ((Flags & DINode::FlagLValueReference) &&
949 (Flags & DINode::FlagRValueReference)) ||
950 ((Flags & DINode::FlagTypePassByValue) &&
951 (Flags & DINode::FlagTypePassByReference));
954 void Verifier::visitTemplateParams(const MDNode &N, const Metadata &RawParams) {
955 auto *Params = dyn_cast<MDTuple>(&RawParams);
956 AssertDI(Params, "invalid template params", &N, &RawParams);
957 for (Metadata *Op : Params->operands()) {
958 AssertDI(Op && isa<DITemplateParameter>(Op), "invalid template parameter",
959 &N, Params, Op);
963 void Verifier::visitDICompositeType(const DICompositeType &N) {
964 // Common scope checks.
965 visitDIScope(N);
967 AssertDI(N.getTag() == dwarf::DW_TAG_array_type ||
968 N.getTag() == dwarf::DW_TAG_structure_type ||
969 N.getTag() == dwarf::DW_TAG_union_type ||
970 N.getTag() == dwarf::DW_TAG_enumeration_type ||
971 N.getTag() == dwarf::DW_TAG_class_type ||
972 N.getTag() == dwarf::DW_TAG_variant_part,
973 "invalid tag", &N);
975 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
976 AssertDI(isType(N.getRawBaseType()), "invalid base type", &N,
977 N.getRawBaseType());
979 AssertDI(!N.getRawElements() || isa<MDTuple>(N.getRawElements()),
980 "invalid composite elements", &N, N.getRawElements());
981 AssertDI(isType(N.getRawVTableHolder()), "invalid vtable holder", &N,
982 N.getRawVTableHolder());
983 AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
984 "invalid reference flags", &N);
985 unsigned DIBlockByRefStruct = 1 << 4;
986 AssertDI((N.getFlags() & DIBlockByRefStruct) == 0,
987 "DIBlockByRefStruct on DICompositeType is no longer supported", &N);
989 if (N.isVector()) {
990 const DINodeArray Elements = N.getElements();
991 AssertDI(Elements.size() == 1 &&
992 Elements[0]->getTag() == dwarf::DW_TAG_subrange_type,
993 "invalid vector, expected one element of type subrange", &N);
996 if (auto *Params = N.getRawTemplateParams())
997 visitTemplateParams(N, *Params);
999 if (N.getTag() == dwarf::DW_TAG_class_type ||
1000 N.getTag() == dwarf::DW_TAG_union_type) {
1001 AssertDI(N.getFile() && !N.getFile()->getFilename().empty(),
1002 "class/union requires a filename", &N, N.getFile());
1005 if (auto *D = N.getRawDiscriminator()) {
1006 AssertDI(isa<DIDerivedType>(D) && N.getTag() == dwarf::DW_TAG_variant_part,
1007 "discriminator can only appear on variant part");
1011 void Verifier::visitDISubroutineType(const DISubroutineType &N) {
1012 AssertDI(N.getTag() == dwarf::DW_TAG_subroutine_type, "invalid tag", &N);
1013 if (auto *Types = N.getRawTypeArray()) {
1014 AssertDI(isa<MDTuple>(Types), "invalid composite elements", &N, Types);
1015 for (Metadata *Ty : N.getTypeArray()->operands()) {
1016 AssertDI(isType(Ty), "invalid subroutine type ref", &N, Types, Ty);
1019 AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1020 "invalid reference flags", &N);
1023 void Verifier::visitDIFile(const DIFile &N) {
1024 AssertDI(N.getTag() == dwarf::DW_TAG_file_type, "invalid tag", &N);
1025 Optional<DIFile::ChecksumInfo<StringRef>> Checksum = N.getChecksum();
1026 if (Checksum) {
1027 AssertDI(Checksum->Kind <= DIFile::ChecksumKind::CSK_Last,
1028 "invalid checksum kind", &N);
1029 size_t Size;
1030 switch (Checksum->Kind) {
1031 case DIFile::CSK_MD5:
1032 Size = 32;
1033 break;
1034 case DIFile::CSK_SHA1:
1035 Size = 40;
1036 break;
1038 AssertDI(Checksum->Value.size() == Size, "invalid checksum length", &N);
1039 AssertDI(Checksum->Value.find_if_not(llvm::isHexDigit) == StringRef::npos,
1040 "invalid checksum", &N);
1044 void Verifier::visitDICompileUnit(const DICompileUnit &N) {
1045 AssertDI(N.isDistinct(), "compile units must be distinct", &N);
1046 AssertDI(N.getTag() == dwarf::DW_TAG_compile_unit, "invalid tag", &N);
1048 // Don't bother verifying the compilation directory or producer string
1049 // as those could be empty.
1050 AssertDI(N.getRawFile() && isa<DIFile>(N.getRawFile()), "invalid file", &N,
1051 N.getRawFile());
1052 AssertDI(!N.getFile()->getFilename().empty(), "invalid filename", &N,
1053 N.getFile());
1055 verifySourceDebugInfo(N, *N.getFile());
1057 AssertDI((N.getEmissionKind() <= DICompileUnit::LastEmissionKind),
1058 "invalid emission kind", &N);
1060 if (auto *Array = N.getRawEnumTypes()) {
1061 AssertDI(isa<MDTuple>(Array), "invalid enum list", &N, Array);
1062 for (Metadata *Op : N.getEnumTypes()->operands()) {
1063 auto *Enum = dyn_cast_or_null<DICompositeType>(Op);
1064 AssertDI(Enum && Enum->getTag() == dwarf::DW_TAG_enumeration_type,
1065 "invalid enum type", &N, N.getEnumTypes(), Op);
1068 if (auto *Array = N.getRawRetainedTypes()) {
1069 AssertDI(isa<MDTuple>(Array), "invalid retained type list", &N, Array);
1070 for (Metadata *Op : N.getRetainedTypes()->operands()) {
1071 AssertDI(Op && (isa<DIType>(Op) ||
1072 (isa<DISubprogram>(Op) &&
1073 !cast<DISubprogram>(Op)->isDefinition())),
1074 "invalid retained type", &N, Op);
1077 if (auto *Array = N.getRawGlobalVariables()) {
1078 AssertDI(isa<MDTuple>(Array), "invalid global variable list", &N, Array);
1079 for (Metadata *Op : N.getGlobalVariables()->operands()) {
1080 AssertDI(Op && (isa<DIGlobalVariableExpression>(Op)),
1081 "invalid global variable ref", &N, Op);
1084 if (auto *Array = N.getRawImportedEntities()) {
1085 AssertDI(isa<MDTuple>(Array), "invalid imported entity list", &N, Array);
1086 for (Metadata *Op : N.getImportedEntities()->operands()) {
1087 AssertDI(Op && isa<DIImportedEntity>(Op), "invalid imported entity ref",
1088 &N, Op);
1091 if (auto *Array = N.getRawMacros()) {
1092 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1093 for (Metadata *Op : N.getMacros()->operands()) {
1094 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1097 CUVisited.insert(&N);
1100 void Verifier::visitDISubprogram(const DISubprogram &N) {
1101 AssertDI(N.getTag() == dwarf::DW_TAG_subprogram, "invalid tag", &N);
1102 AssertDI(isScope(N.getRawScope()), "invalid scope", &N, N.getRawScope());
1103 if (auto *F = N.getRawFile())
1104 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1105 else
1106 AssertDI(N.getLine() == 0, "line specified with no file", &N, N.getLine());
1107 if (auto *T = N.getRawType())
1108 AssertDI(isa<DISubroutineType>(T), "invalid subroutine type", &N, T);
1109 AssertDI(isType(N.getRawContainingType()), "invalid containing type", &N,
1110 N.getRawContainingType());
1111 if (auto *Params = N.getRawTemplateParams())
1112 visitTemplateParams(N, *Params);
1113 if (auto *S = N.getRawDeclaration())
1114 AssertDI(isa<DISubprogram>(S) && !cast<DISubprogram>(S)->isDefinition(),
1115 "invalid subprogram declaration", &N, S);
1116 if (auto *RawNode = N.getRawRetainedNodes()) {
1117 auto *Node = dyn_cast<MDTuple>(RawNode);
1118 AssertDI(Node, "invalid retained nodes list", &N, RawNode);
1119 for (Metadata *Op : Node->operands()) {
1120 AssertDI(Op && (isa<DILocalVariable>(Op) || isa<DILabel>(Op)),
1121 "invalid retained nodes, expected DILocalVariable or DILabel",
1122 &N, Node, Op);
1125 AssertDI(!hasConflictingReferenceFlags(N.getFlags()),
1126 "invalid reference flags", &N);
1128 auto *Unit = N.getRawUnit();
1129 if (N.isDefinition()) {
1130 // Subprogram definitions (not part of the type hierarchy).
1131 AssertDI(N.isDistinct(), "subprogram definitions must be distinct", &N);
1132 AssertDI(Unit, "subprogram definitions must have a compile unit", &N);
1133 AssertDI(isa<DICompileUnit>(Unit), "invalid unit type", &N, Unit);
1134 if (N.getFile())
1135 verifySourceDebugInfo(*N.getUnit(), *N.getFile());
1136 } else {
1137 // Subprogram declarations (part of the type hierarchy).
1138 AssertDI(!Unit, "subprogram declarations must not have a compile unit", &N);
1141 if (auto *RawThrownTypes = N.getRawThrownTypes()) {
1142 auto *ThrownTypes = dyn_cast<MDTuple>(RawThrownTypes);
1143 AssertDI(ThrownTypes, "invalid thrown types list", &N, RawThrownTypes);
1144 for (Metadata *Op : ThrownTypes->operands())
1145 AssertDI(Op && isa<DIType>(Op), "invalid thrown type", &N, ThrownTypes,
1146 Op);
1149 if (N.areAllCallsDescribed())
1150 AssertDI(N.isDefinition(),
1151 "DIFlagAllCallsDescribed must be attached to a definition");
1154 void Verifier::visitDILexicalBlockBase(const DILexicalBlockBase &N) {
1155 AssertDI(N.getTag() == dwarf::DW_TAG_lexical_block, "invalid tag", &N);
1156 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1157 "invalid local scope", &N, N.getRawScope());
1158 if (auto *SP = dyn_cast<DISubprogram>(N.getRawScope()))
1159 AssertDI(SP->isDefinition(), "scope points into the type hierarchy", &N);
1162 void Verifier::visitDILexicalBlock(const DILexicalBlock &N) {
1163 visitDILexicalBlockBase(N);
1165 AssertDI(N.getLine() || !N.getColumn(),
1166 "cannot have column info without line info", &N);
1169 void Verifier::visitDILexicalBlockFile(const DILexicalBlockFile &N) {
1170 visitDILexicalBlockBase(N);
1173 void Verifier::visitDICommonBlock(const DICommonBlock &N) {
1174 AssertDI(N.getTag() == dwarf::DW_TAG_common_block, "invalid tag", &N);
1175 if (auto *S = N.getRawScope())
1176 AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1177 if (auto *S = N.getRawDecl())
1178 AssertDI(isa<DIGlobalVariable>(S), "invalid declaration", &N, S);
1181 void Verifier::visitDINamespace(const DINamespace &N) {
1182 AssertDI(N.getTag() == dwarf::DW_TAG_namespace, "invalid tag", &N);
1183 if (auto *S = N.getRawScope())
1184 AssertDI(isa<DIScope>(S), "invalid scope ref", &N, S);
1187 void Verifier::visitDIMacro(const DIMacro &N) {
1188 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_define ||
1189 N.getMacinfoType() == dwarf::DW_MACINFO_undef,
1190 "invalid macinfo type", &N);
1191 AssertDI(!N.getName().empty(), "anonymous macro", &N);
1192 if (!N.getValue().empty()) {
1193 assert(N.getValue().data()[0] != ' ' && "Macro value has a space prefix");
1197 void Verifier::visitDIMacroFile(const DIMacroFile &N) {
1198 AssertDI(N.getMacinfoType() == dwarf::DW_MACINFO_start_file,
1199 "invalid macinfo type", &N);
1200 if (auto *F = N.getRawFile())
1201 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1203 if (auto *Array = N.getRawElements()) {
1204 AssertDI(isa<MDTuple>(Array), "invalid macro list", &N, Array);
1205 for (Metadata *Op : N.getElements()->operands()) {
1206 AssertDI(Op && isa<DIMacroNode>(Op), "invalid macro ref", &N, Op);
1211 void Verifier::visitDIModule(const DIModule &N) {
1212 AssertDI(N.getTag() == dwarf::DW_TAG_module, "invalid tag", &N);
1213 AssertDI(!N.getName().empty(), "anonymous module", &N);
1216 void Verifier::visitDITemplateParameter(const DITemplateParameter &N) {
1217 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1220 void Verifier::visitDITemplateTypeParameter(const DITemplateTypeParameter &N) {
1221 visitDITemplateParameter(N);
1223 AssertDI(N.getTag() == dwarf::DW_TAG_template_type_parameter, "invalid tag",
1224 &N);
1227 void Verifier::visitDITemplateValueParameter(
1228 const DITemplateValueParameter &N) {
1229 visitDITemplateParameter(N);
1231 AssertDI(N.getTag() == dwarf::DW_TAG_template_value_parameter ||
1232 N.getTag() == dwarf::DW_TAG_GNU_template_template_param ||
1233 N.getTag() == dwarf::DW_TAG_GNU_template_parameter_pack,
1234 "invalid tag", &N);
1237 void Verifier::visitDIVariable(const DIVariable &N) {
1238 if (auto *S = N.getRawScope())
1239 AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1240 if (auto *F = N.getRawFile())
1241 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1244 void Verifier::visitDIGlobalVariable(const DIGlobalVariable &N) {
1245 // Checks common to all variables.
1246 visitDIVariable(N);
1248 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1249 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1250 AssertDI(N.getType(), "missing global variable type", &N);
1251 if (auto *Member = N.getRawStaticDataMemberDeclaration()) {
1252 AssertDI(isa<DIDerivedType>(Member),
1253 "invalid static data member declaration", &N, Member);
1257 void Verifier::visitDILocalVariable(const DILocalVariable &N) {
1258 // Checks common to all variables.
1259 visitDIVariable(N);
1261 AssertDI(isType(N.getRawType()), "invalid type ref", &N, N.getRawType());
1262 AssertDI(N.getTag() == dwarf::DW_TAG_variable, "invalid tag", &N);
1263 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1264 "local variable requires a valid scope", &N, N.getRawScope());
1265 if (auto Ty = N.getType())
1266 AssertDI(!isa<DISubroutineType>(Ty), "invalid type", &N, N.getType());
1269 void Verifier::visitDILabel(const DILabel &N) {
1270 if (auto *S = N.getRawScope())
1271 AssertDI(isa<DIScope>(S), "invalid scope", &N, S);
1272 if (auto *F = N.getRawFile())
1273 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1275 AssertDI(N.getTag() == dwarf::DW_TAG_label, "invalid tag", &N);
1276 AssertDI(N.getRawScope() && isa<DILocalScope>(N.getRawScope()),
1277 "label requires a valid scope", &N, N.getRawScope());
1280 void Verifier::visitDIExpression(const DIExpression &N) {
1281 AssertDI(N.isValid(), "invalid expression", &N);
1284 void Verifier::visitDIGlobalVariableExpression(
1285 const DIGlobalVariableExpression &GVE) {
1286 AssertDI(GVE.getVariable(), "missing variable");
1287 if (auto *Var = GVE.getVariable())
1288 visitDIGlobalVariable(*Var);
1289 if (auto *Expr = GVE.getExpression()) {
1290 visitDIExpression(*Expr);
1291 if (auto Fragment = Expr->getFragmentInfo())
1292 verifyFragmentExpression(*GVE.getVariable(), *Fragment, &GVE);
1296 void Verifier::visitDIObjCProperty(const DIObjCProperty &N) {
1297 AssertDI(N.getTag() == dwarf::DW_TAG_APPLE_property, "invalid tag", &N);
1298 if (auto *T = N.getRawType())
1299 AssertDI(isType(T), "invalid type ref", &N, T);
1300 if (auto *F = N.getRawFile())
1301 AssertDI(isa<DIFile>(F), "invalid file", &N, F);
1304 void Verifier::visitDIImportedEntity(const DIImportedEntity &N) {
1305 AssertDI(N.getTag() == dwarf::DW_TAG_imported_module ||
1306 N.getTag() == dwarf::DW_TAG_imported_declaration,
1307 "invalid tag", &N);
1308 if (auto *S = N.getRawScope())
1309 AssertDI(isa<DIScope>(S), "invalid scope for imported entity", &N, S);
1310 AssertDI(isDINode(N.getRawEntity()), "invalid imported entity", &N,
1311 N.getRawEntity());
1314 void Verifier::visitComdat(const Comdat &C) {
1315 // In COFF the Module is invalid if the GlobalValue has private linkage.
1316 // Entities with private linkage don't have entries in the symbol table.
1317 if (TT.isOSBinFormatCOFF())
1318 if (const GlobalValue *GV = M.getNamedValue(C.getName()))
1319 Assert(!GV->hasPrivateLinkage(),
1320 "comdat global value has private linkage", GV);
1323 void Verifier::visitModuleIdents(const Module &M) {
1324 const NamedMDNode *Idents = M.getNamedMetadata("llvm.ident");
1325 if (!Idents)
1326 return;
1328 // llvm.ident takes a list of metadata entry. Each entry has only one string.
1329 // Scan each llvm.ident entry and make sure that this requirement is met.
1330 for (const MDNode *N : Idents->operands()) {
1331 Assert(N->getNumOperands() == 1,
1332 "incorrect number of operands in llvm.ident metadata", N);
1333 Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1334 ("invalid value for llvm.ident metadata entry operand"
1335 "(the operand should be a string)"),
1336 N->getOperand(0));
1340 void Verifier::visitModuleCommandLines(const Module &M) {
1341 const NamedMDNode *CommandLines = M.getNamedMetadata("llvm.commandline");
1342 if (!CommandLines)
1343 return;
1345 // llvm.commandline takes a list of metadata entry. Each entry has only one
1346 // string. Scan each llvm.commandline entry and make sure that this
1347 // requirement is met.
1348 for (const MDNode *N : CommandLines->operands()) {
1349 Assert(N->getNumOperands() == 1,
1350 "incorrect number of operands in llvm.commandline metadata", N);
1351 Assert(dyn_cast_or_null<MDString>(N->getOperand(0)),
1352 ("invalid value for llvm.commandline metadata entry operand"
1353 "(the operand should be a string)"),
1354 N->getOperand(0));
1358 void Verifier::visitModuleFlags(const Module &M) {
1359 const NamedMDNode *Flags = M.getModuleFlagsMetadata();
1360 if (!Flags) return;
1362 // Scan each flag, and track the flags and requirements.
1363 DenseMap<const MDString*, const MDNode*> SeenIDs;
1364 SmallVector<const MDNode*, 16> Requirements;
1365 for (const MDNode *MDN : Flags->operands())
1366 visitModuleFlag(MDN, SeenIDs, Requirements);
1368 // Validate that the requirements in the module are valid.
1369 for (const MDNode *Requirement : Requirements) {
1370 const MDString *Flag = cast<MDString>(Requirement->getOperand(0));
1371 const Metadata *ReqValue = Requirement->getOperand(1);
1373 const MDNode *Op = SeenIDs.lookup(Flag);
1374 if (!Op) {
1375 CheckFailed("invalid requirement on flag, flag is not present in module",
1376 Flag);
1377 continue;
1380 if (Op->getOperand(2) != ReqValue) {
1381 CheckFailed(("invalid requirement on flag, "
1382 "flag does not have the required value"),
1383 Flag);
1384 continue;
1389 void
1390 Verifier::visitModuleFlag(const MDNode *Op,
1391 DenseMap<const MDString *, const MDNode *> &SeenIDs,
1392 SmallVectorImpl<const MDNode *> &Requirements) {
1393 // Each module flag should have three arguments, the merge behavior (a
1394 // constant int), the flag ID (an MDString), and the value.
1395 Assert(Op->getNumOperands() == 3,
1396 "incorrect number of operands in module flag", Op);
1397 Module::ModFlagBehavior MFB;
1398 if (!Module::isValidModFlagBehavior(Op->getOperand(0), MFB)) {
1399 Assert(
1400 mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(0)),
1401 "invalid behavior operand in module flag (expected constant integer)",
1402 Op->getOperand(0));
1403 Assert(false,
1404 "invalid behavior operand in module flag (unexpected constant)",
1405 Op->getOperand(0));
1407 MDString *ID = dyn_cast_or_null<MDString>(Op->getOperand(1));
1408 Assert(ID, "invalid ID operand in module flag (expected metadata string)",
1409 Op->getOperand(1));
1411 // Sanity check the values for behaviors with additional requirements.
1412 switch (MFB) {
1413 case Module::Error:
1414 case Module::Warning:
1415 case Module::Override:
1416 // These behavior types accept any value.
1417 break;
1419 case Module::Max: {
1420 Assert(mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2)),
1421 "invalid value for 'max' module flag (expected constant integer)",
1422 Op->getOperand(2));
1423 break;
1426 case Module::Require: {
1427 // The value should itself be an MDNode with two operands, a flag ID (an
1428 // MDString), and a value.
1429 MDNode *Value = dyn_cast<MDNode>(Op->getOperand(2));
1430 Assert(Value && Value->getNumOperands() == 2,
1431 "invalid value for 'require' module flag (expected metadata pair)",
1432 Op->getOperand(2));
1433 Assert(isa<MDString>(Value->getOperand(0)),
1434 ("invalid value for 'require' module flag "
1435 "(first value operand should be a string)"),
1436 Value->getOperand(0));
1438 // Append it to the list of requirements, to check once all module flags are
1439 // scanned.
1440 Requirements.push_back(Value);
1441 break;
1444 case Module::Append:
1445 case Module::AppendUnique: {
1446 // These behavior types require the operand be an MDNode.
1447 Assert(isa<MDNode>(Op->getOperand(2)),
1448 "invalid value for 'append'-type module flag "
1449 "(expected a metadata node)",
1450 Op->getOperand(2));
1451 break;
1455 // Unless this is a "requires" flag, check the ID is unique.
1456 if (MFB != Module::Require) {
1457 bool Inserted = SeenIDs.insert(std::make_pair(ID, Op)).second;
1458 Assert(Inserted,
1459 "module flag identifiers must be unique (or of 'require' type)", ID);
1462 if (ID->getString() == "wchar_size") {
1463 ConstantInt *Value
1464 = mdconst::dyn_extract_or_null<ConstantInt>(Op->getOperand(2));
1465 Assert(Value, "wchar_size metadata requires constant integer argument");
1468 if (ID->getString() == "Linker Options") {
1469 // If the llvm.linker.options named metadata exists, we assume that the
1470 // bitcode reader has upgraded the module flag. Otherwise the flag might
1471 // have been created by a client directly.
1472 Assert(M.getNamedMetadata("llvm.linker.options"),
1473 "'Linker Options' named metadata no longer supported");
1476 if (ID->getString() == "CG Profile") {
1477 for (const MDOperand &MDO : cast<MDNode>(Op->getOperand(2))->operands())
1478 visitModuleFlagCGProfileEntry(MDO);
1482 void Verifier::visitModuleFlagCGProfileEntry(const MDOperand &MDO) {
1483 auto CheckFunction = [&](const MDOperand &FuncMDO) {
1484 if (!FuncMDO)
1485 return;
1486 auto F = dyn_cast<ValueAsMetadata>(FuncMDO);
1487 Assert(F && isa<Function>(F->getValue()), "expected a Function or null",
1488 FuncMDO);
1490 auto Node = dyn_cast_or_null<MDNode>(MDO);
1491 Assert(Node && Node->getNumOperands() == 3, "expected a MDNode triple", MDO);
1492 CheckFunction(Node->getOperand(0));
1493 CheckFunction(Node->getOperand(1));
1494 auto Count = dyn_cast_or_null<ConstantAsMetadata>(Node->getOperand(2));
1495 Assert(Count && Count->getType()->isIntegerTy(),
1496 "expected an integer constant", Node->getOperand(2));
1499 /// Return true if this attribute kind only applies to functions.
1500 static bool isFuncOnlyAttr(Attribute::AttrKind Kind) {
1501 switch (Kind) {
1502 case Attribute::NoReturn:
1503 case Attribute::NoSync:
1504 case Attribute::WillReturn:
1505 case Attribute::NoCfCheck:
1506 case Attribute::NoUnwind:
1507 case Attribute::NoInline:
1508 case Attribute::NoFree:
1509 case Attribute::AlwaysInline:
1510 case Attribute::OptimizeForSize:
1511 case Attribute::StackProtect:
1512 case Attribute::StackProtectReq:
1513 case Attribute::StackProtectStrong:
1514 case Attribute::SafeStack:
1515 case Attribute::ShadowCallStack:
1516 case Attribute::NoRedZone:
1517 case Attribute::NoImplicitFloat:
1518 case Attribute::Naked:
1519 case Attribute::InlineHint:
1520 case Attribute::StackAlignment:
1521 case Attribute::UWTable:
1522 case Attribute::NonLazyBind:
1523 case Attribute::ReturnsTwice:
1524 case Attribute::SanitizeAddress:
1525 case Attribute::SanitizeHWAddress:
1526 case Attribute::SanitizeMemTag:
1527 case Attribute::SanitizeThread:
1528 case Attribute::SanitizeMemory:
1529 case Attribute::MinSize:
1530 case Attribute::NoDuplicate:
1531 case Attribute::Builtin:
1532 case Attribute::NoBuiltin:
1533 case Attribute::Cold:
1534 case Attribute::OptForFuzzing:
1535 case Attribute::OptimizeNone:
1536 case Attribute::JumpTable:
1537 case Attribute::Convergent:
1538 case Attribute::ArgMemOnly:
1539 case Attribute::NoRecurse:
1540 case Attribute::InaccessibleMemOnly:
1541 case Attribute::InaccessibleMemOrArgMemOnly:
1542 case Attribute::AllocSize:
1543 case Attribute::SpeculativeLoadHardening:
1544 case Attribute::Speculatable:
1545 case Attribute::StrictFP:
1546 return true;
1547 default:
1548 break;
1550 return false;
1553 /// Return true if this is a function attribute that can also appear on
1554 /// arguments.
1555 static bool isFuncOrArgAttr(Attribute::AttrKind Kind) {
1556 return Kind == Attribute::ReadOnly || Kind == Attribute::WriteOnly ||
1557 Kind == Attribute::ReadNone;
1560 void Verifier::verifyAttributeTypes(AttributeSet Attrs, bool IsFunction,
1561 const Value *V) {
1562 for (Attribute A : Attrs) {
1563 if (A.isStringAttribute())
1564 continue;
1566 if (isFuncOnlyAttr(A.getKindAsEnum())) {
1567 if (!IsFunction) {
1568 CheckFailed("Attribute '" + A.getAsString() +
1569 "' only applies to functions!",
1571 return;
1573 } else if (IsFunction && !isFuncOrArgAttr(A.getKindAsEnum())) {
1574 CheckFailed("Attribute '" + A.getAsString() +
1575 "' does not apply to functions!",
1577 return;
1582 // VerifyParameterAttrs - Check the given attributes for an argument or return
1583 // value of the specified type. The value V is printed in error messages.
1584 void Verifier::verifyParameterAttrs(AttributeSet Attrs, Type *Ty,
1585 const Value *V) {
1586 if (!Attrs.hasAttributes())
1587 return;
1589 verifyAttributeTypes(Attrs, /*IsFunction=*/false, V);
1591 if (Attrs.hasAttribute(Attribute::ImmArg)) {
1592 Assert(Attrs.getNumAttributes() == 1,
1593 "Attribute 'immarg' is incompatible with other attributes", V);
1596 // Check for mutually incompatible attributes. Only inreg is compatible with
1597 // sret.
1598 unsigned AttrCount = 0;
1599 AttrCount += Attrs.hasAttribute(Attribute::ByVal);
1600 AttrCount += Attrs.hasAttribute(Attribute::InAlloca);
1601 AttrCount += Attrs.hasAttribute(Attribute::StructRet) ||
1602 Attrs.hasAttribute(Attribute::InReg);
1603 AttrCount += Attrs.hasAttribute(Attribute::Nest);
1604 Assert(AttrCount <= 1, "Attributes 'byval', 'inalloca', 'inreg', 'nest', "
1605 "and 'sret' are incompatible!",
1608 Assert(!(Attrs.hasAttribute(Attribute::InAlloca) &&
1609 Attrs.hasAttribute(Attribute::ReadOnly)),
1610 "Attributes "
1611 "'inalloca and readonly' are incompatible!",
1614 Assert(!(Attrs.hasAttribute(Attribute::StructRet) &&
1615 Attrs.hasAttribute(Attribute::Returned)),
1616 "Attributes "
1617 "'sret and returned' are incompatible!",
1620 Assert(!(Attrs.hasAttribute(Attribute::ZExt) &&
1621 Attrs.hasAttribute(Attribute::SExt)),
1622 "Attributes "
1623 "'zeroext and signext' are incompatible!",
1626 Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1627 Attrs.hasAttribute(Attribute::ReadOnly)),
1628 "Attributes "
1629 "'readnone and readonly' are incompatible!",
1632 Assert(!(Attrs.hasAttribute(Attribute::ReadNone) &&
1633 Attrs.hasAttribute(Attribute::WriteOnly)),
1634 "Attributes "
1635 "'readnone and writeonly' are incompatible!",
1638 Assert(!(Attrs.hasAttribute(Attribute::ReadOnly) &&
1639 Attrs.hasAttribute(Attribute::WriteOnly)),
1640 "Attributes "
1641 "'readonly and writeonly' are incompatible!",
1644 Assert(!(Attrs.hasAttribute(Attribute::NoInline) &&
1645 Attrs.hasAttribute(Attribute::AlwaysInline)),
1646 "Attributes "
1647 "'noinline and alwaysinline' are incompatible!",
1650 if (Attrs.hasAttribute(Attribute::ByVal) && Attrs.getByValType()) {
1651 Assert(Attrs.getByValType() == cast<PointerType>(Ty)->getElementType(),
1652 "Attribute 'byval' type does not match parameter!", V);
1655 AttrBuilder IncompatibleAttrs = AttributeFuncs::typeIncompatible(Ty);
1656 Assert(!AttrBuilder(Attrs).overlaps(IncompatibleAttrs),
1657 "Wrong types for attribute: " +
1658 AttributeSet::get(Context, IncompatibleAttrs).getAsString(),
1661 if (PointerType *PTy = dyn_cast<PointerType>(Ty)) {
1662 SmallPtrSet<Type*, 4> Visited;
1663 if (!PTy->getElementType()->isSized(&Visited)) {
1664 Assert(!Attrs.hasAttribute(Attribute::ByVal) &&
1665 !Attrs.hasAttribute(Attribute::InAlloca),
1666 "Attributes 'byval' and 'inalloca' do not support unsized types!",
1669 if (!isa<PointerType>(PTy->getElementType()))
1670 Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1671 "Attribute 'swifterror' only applies to parameters "
1672 "with pointer to pointer type!",
1674 } else {
1675 Assert(!Attrs.hasAttribute(Attribute::ByVal),
1676 "Attribute 'byval' only applies to parameters with pointer type!",
1678 Assert(!Attrs.hasAttribute(Attribute::SwiftError),
1679 "Attribute 'swifterror' only applies to parameters "
1680 "with pointer type!",
1685 // Check parameter attributes against a function type.
1686 // The value V is printed in error messages.
1687 void Verifier::verifyFunctionAttrs(FunctionType *FT, AttributeList Attrs,
1688 const Value *V, bool IsIntrinsic) {
1689 if (Attrs.isEmpty())
1690 return;
1692 bool SawNest = false;
1693 bool SawReturned = false;
1694 bool SawSRet = false;
1695 bool SawSwiftSelf = false;
1696 bool SawSwiftError = false;
1698 // Verify return value attributes.
1699 AttributeSet RetAttrs = Attrs.getRetAttributes();
1700 Assert((!RetAttrs.hasAttribute(Attribute::ByVal) &&
1701 !RetAttrs.hasAttribute(Attribute::Nest) &&
1702 !RetAttrs.hasAttribute(Attribute::StructRet) &&
1703 !RetAttrs.hasAttribute(Attribute::NoCapture) &&
1704 !RetAttrs.hasAttribute(Attribute::Returned) &&
1705 !RetAttrs.hasAttribute(Attribute::InAlloca) &&
1706 !RetAttrs.hasAttribute(Attribute::SwiftSelf) &&
1707 !RetAttrs.hasAttribute(Attribute::SwiftError)),
1708 "Attributes 'byval', 'inalloca', 'nest', 'sret', 'nocapture', "
1709 "'returned', 'swiftself', and 'swifterror' do not apply to return "
1710 "values!",
1712 Assert((!RetAttrs.hasAttribute(Attribute::ReadOnly) &&
1713 !RetAttrs.hasAttribute(Attribute::WriteOnly) &&
1714 !RetAttrs.hasAttribute(Attribute::ReadNone)),
1715 "Attribute '" + RetAttrs.getAsString() +
1716 "' does not apply to function returns",
1718 verifyParameterAttrs(RetAttrs, FT->getReturnType(), V);
1720 // Verify parameter attributes.
1721 for (unsigned i = 0, e = FT->getNumParams(); i != e; ++i) {
1722 Type *Ty = FT->getParamType(i);
1723 AttributeSet ArgAttrs = Attrs.getParamAttributes(i);
1725 if (!IsIntrinsic) {
1726 Assert(!ArgAttrs.hasAttribute(Attribute::ImmArg),
1727 "immarg attribute only applies to intrinsics",V);
1730 verifyParameterAttrs(ArgAttrs, Ty, V);
1732 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
1733 Assert(!SawNest, "More than one parameter has attribute nest!", V);
1734 SawNest = true;
1737 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
1738 Assert(!SawReturned, "More than one parameter has attribute returned!",
1740 Assert(Ty->canLosslesslyBitCastTo(FT->getReturnType()),
1741 "Incompatible argument and return types for 'returned' attribute",
1743 SawReturned = true;
1746 if (ArgAttrs.hasAttribute(Attribute::StructRet)) {
1747 Assert(!SawSRet, "Cannot have multiple 'sret' parameters!", V);
1748 Assert(i == 0 || i == 1,
1749 "Attribute 'sret' is not on first or second parameter!", V);
1750 SawSRet = true;
1753 if (ArgAttrs.hasAttribute(Attribute::SwiftSelf)) {
1754 Assert(!SawSwiftSelf, "Cannot have multiple 'swiftself' parameters!", V);
1755 SawSwiftSelf = true;
1758 if (ArgAttrs.hasAttribute(Attribute::SwiftError)) {
1759 Assert(!SawSwiftError, "Cannot have multiple 'swifterror' parameters!",
1761 SawSwiftError = true;
1764 if (ArgAttrs.hasAttribute(Attribute::InAlloca)) {
1765 Assert(i == FT->getNumParams() - 1,
1766 "inalloca isn't on the last parameter!", V);
1770 if (!Attrs.hasAttributes(AttributeList::FunctionIndex))
1771 return;
1773 verifyAttributeTypes(Attrs.getFnAttributes(), /*IsFunction=*/true, V);
1775 Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1776 Attrs.hasFnAttribute(Attribute::ReadOnly)),
1777 "Attributes 'readnone and readonly' are incompatible!", V);
1779 Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1780 Attrs.hasFnAttribute(Attribute::WriteOnly)),
1781 "Attributes 'readnone and writeonly' are incompatible!", V);
1783 Assert(!(Attrs.hasFnAttribute(Attribute::ReadOnly) &&
1784 Attrs.hasFnAttribute(Attribute::WriteOnly)),
1785 "Attributes 'readonly and writeonly' are incompatible!", V);
1787 Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1788 Attrs.hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly)),
1789 "Attributes 'readnone and inaccessiblemem_or_argmemonly' are "
1790 "incompatible!",
1793 Assert(!(Attrs.hasFnAttribute(Attribute::ReadNone) &&
1794 Attrs.hasFnAttribute(Attribute::InaccessibleMemOnly)),
1795 "Attributes 'readnone and inaccessiblememonly' are incompatible!", V);
1797 Assert(!(Attrs.hasFnAttribute(Attribute::NoInline) &&
1798 Attrs.hasFnAttribute(Attribute::AlwaysInline)),
1799 "Attributes 'noinline and alwaysinline' are incompatible!", V);
1801 if (Attrs.hasFnAttribute(Attribute::OptimizeNone)) {
1802 Assert(Attrs.hasFnAttribute(Attribute::NoInline),
1803 "Attribute 'optnone' requires 'noinline'!", V);
1805 Assert(!Attrs.hasFnAttribute(Attribute::OptimizeForSize),
1806 "Attributes 'optsize and optnone' are incompatible!", V);
1808 Assert(!Attrs.hasFnAttribute(Attribute::MinSize),
1809 "Attributes 'minsize and optnone' are incompatible!", V);
1812 if (Attrs.hasFnAttribute(Attribute::JumpTable)) {
1813 const GlobalValue *GV = cast<GlobalValue>(V);
1814 Assert(GV->hasGlobalUnnamedAddr(),
1815 "Attribute 'jumptable' requires 'unnamed_addr'", V);
1818 if (Attrs.hasFnAttribute(Attribute::AllocSize)) {
1819 std::pair<unsigned, Optional<unsigned>> Args =
1820 Attrs.getAllocSizeArgs(AttributeList::FunctionIndex);
1822 auto CheckParam = [&](StringRef Name, unsigned ParamNo) {
1823 if (ParamNo >= FT->getNumParams()) {
1824 CheckFailed("'allocsize' " + Name + " argument is out of bounds", V);
1825 return false;
1828 if (!FT->getParamType(ParamNo)->isIntegerTy()) {
1829 CheckFailed("'allocsize' " + Name +
1830 " argument must refer to an integer parameter",
1832 return false;
1835 return true;
1838 if (!CheckParam("element size", Args.first))
1839 return;
1841 if (Args.second && !CheckParam("number of elements", *Args.second))
1842 return;
1846 void Verifier::verifyFunctionMetadata(
1847 ArrayRef<std::pair<unsigned, MDNode *>> MDs) {
1848 for (const auto &Pair : MDs) {
1849 if (Pair.first == LLVMContext::MD_prof) {
1850 MDNode *MD = Pair.second;
1851 Assert(MD->getNumOperands() >= 2,
1852 "!prof annotations should have no less than 2 operands", MD);
1854 // Check first operand.
1855 Assert(MD->getOperand(0) != nullptr, "first operand should not be null",
1856 MD);
1857 Assert(isa<MDString>(MD->getOperand(0)),
1858 "expected string with name of the !prof annotation", MD);
1859 MDString *MDS = cast<MDString>(MD->getOperand(0));
1860 StringRef ProfName = MDS->getString();
1861 Assert(ProfName.equals("function_entry_count") ||
1862 ProfName.equals("synthetic_function_entry_count"),
1863 "first operand should be 'function_entry_count'"
1864 " or 'synthetic_function_entry_count'",
1865 MD);
1867 // Check second operand.
1868 Assert(MD->getOperand(1) != nullptr, "second operand should not be null",
1869 MD);
1870 Assert(isa<ConstantAsMetadata>(MD->getOperand(1)),
1871 "expected integer argument to function_entry_count", MD);
1876 void Verifier::visitConstantExprsRecursively(const Constant *EntryC) {
1877 if (!ConstantExprVisited.insert(EntryC).second)
1878 return;
1880 SmallVector<const Constant *, 16> Stack;
1881 Stack.push_back(EntryC);
1883 while (!Stack.empty()) {
1884 const Constant *C = Stack.pop_back_val();
1886 // Check this constant expression.
1887 if (const auto *CE = dyn_cast<ConstantExpr>(C))
1888 visitConstantExpr(CE);
1890 if (const auto *GV = dyn_cast<GlobalValue>(C)) {
1891 // Global Values get visited separately, but we do need to make sure
1892 // that the global value is in the correct module
1893 Assert(GV->getParent() == &M, "Referencing global in another module!",
1894 EntryC, &M, GV, GV->getParent());
1895 continue;
1898 // Visit all sub-expressions.
1899 for (const Use &U : C->operands()) {
1900 const auto *OpC = dyn_cast<Constant>(U);
1901 if (!OpC)
1902 continue;
1903 if (!ConstantExprVisited.insert(OpC).second)
1904 continue;
1905 Stack.push_back(OpC);
1910 void Verifier::visitConstantExpr(const ConstantExpr *CE) {
1911 if (CE->getOpcode() == Instruction::BitCast)
1912 Assert(CastInst::castIsValid(Instruction::BitCast, CE->getOperand(0),
1913 CE->getType()),
1914 "Invalid bitcast", CE);
1916 if (CE->getOpcode() == Instruction::IntToPtr ||
1917 CE->getOpcode() == Instruction::PtrToInt) {
1918 auto *PtrTy = CE->getOpcode() == Instruction::IntToPtr
1919 ? CE->getType()
1920 : CE->getOperand(0)->getType();
1921 StringRef Msg = CE->getOpcode() == Instruction::IntToPtr
1922 ? "inttoptr not supported for non-integral pointers"
1923 : "ptrtoint not supported for non-integral pointers";
1924 Assert(
1925 !DL.isNonIntegralPointerType(cast<PointerType>(PtrTy->getScalarType())),
1926 Msg);
1930 bool Verifier::verifyAttributeCount(AttributeList Attrs, unsigned Params) {
1931 // There shouldn't be more attribute sets than there are parameters plus the
1932 // function and return value.
1933 return Attrs.getNumAttrSets() <= Params + 2;
1936 /// Verify that statepoint intrinsic is well formed.
1937 void Verifier::verifyStatepoint(const CallBase &Call) {
1938 assert(Call.getCalledFunction() &&
1939 Call.getCalledFunction()->getIntrinsicID() ==
1940 Intrinsic::experimental_gc_statepoint);
1942 Assert(!Call.doesNotAccessMemory() && !Call.onlyReadsMemory() &&
1943 !Call.onlyAccessesArgMemory(),
1944 "gc.statepoint must read and write all memory to preserve "
1945 "reordering restrictions required by safepoint semantics",
1946 Call);
1948 const int64_t NumPatchBytes =
1949 cast<ConstantInt>(Call.getArgOperand(1))->getSExtValue();
1950 assert(isInt<32>(NumPatchBytes) && "NumPatchBytesV is an i32!");
1951 Assert(NumPatchBytes >= 0,
1952 "gc.statepoint number of patchable bytes must be "
1953 "positive",
1954 Call);
1956 const Value *Target = Call.getArgOperand(2);
1957 auto *PT = dyn_cast<PointerType>(Target->getType());
1958 Assert(PT && PT->getElementType()->isFunctionTy(),
1959 "gc.statepoint callee must be of function pointer type", Call, Target);
1960 FunctionType *TargetFuncType = cast<FunctionType>(PT->getElementType());
1962 const int NumCallArgs = cast<ConstantInt>(Call.getArgOperand(3))->getZExtValue();
1963 Assert(NumCallArgs >= 0,
1964 "gc.statepoint number of arguments to underlying call "
1965 "must be positive",
1966 Call);
1967 const int NumParams = (int)TargetFuncType->getNumParams();
1968 if (TargetFuncType->isVarArg()) {
1969 Assert(NumCallArgs >= NumParams,
1970 "gc.statepoint mismatch in number of vararg call args", Call);
1972 // TODO: Remove this limitation
1973 Assert(TargetFuncType->getReturnType()->isVoidTy(),
1974 "gc.statepoint doesn't support wrapping non-void "
1975 "vararg functions yet",
1976 Call);
1977 } else
1978 Assert(NumCallArgs == NumParams,
1979 "gc.statepoint mismatch in number of call args", Call);
1981 const uint64_t Flags
1982 = cast<ConstantInt>(Call.getArgOperand(4))->getZExtValue();
1983 Assert((Flags & ~(uint64_t)StatepointFlags::MaskAll) == 0,
1984 "unknown flag used in gc.statepoint flags argument", Call);
1986 // Verify that the types of the call parameter arguments match
1987 // the type of the wrapped callee.
1988 AttributeList Attrs = Call.getAttributes();
1989 for (int i = 0; i < NumParams; i++) {
1990 Type *ParamType = TargetFuncType->getParamType(i);
1991 Type *ArgType = Call.getArgOperand(5 + i)->getType();
1992 Assert(ArgType == ParamType,
1993 "gc.statepoint call argument does not match wrapped "
1994 "function type",
1995 Call);
1997 if (TargetFuncType->isVarArg()) {
1998 AttributeSet ArgAttrs = Attrs.getParamAttributes(5 + i);
1999 Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2000 "Attribute 'sret' cannot be used for vararg call arguments!",
2001 Call);
2005 const int EndCallArgsInx = 4 + NumCallArgs;
2007 const Value *NumTransitionArgsV = Call.getArgOperand(EndCallArgsInx + 1);
2008 Assert(isa<ConstantInt>(NumTransitionArgsV),
2009 "gc.statepoint number of transition arguments "
2010 "must be constant integer",
2011 Call);
2012 const int NumTransitionArgs =
2013 cast<ConstantInt>(NumTransitionArgsV)->getZExtValue();
2014 Assert(NumTransitionArgs >= 0,
2015 "gc.statepoint number of transition arguments must be positive", Call);
2016 const int EndTransitionArgsInx = EndCallArgsInx + 1 + NumTransitionArgs;
2018 const Value *NumDeoptArgsV = Call.getArgOperand(EndTransitionArgsInx + 1);
2019 Assert(isa<ConstantInt>(NumDeoptArgsV),
2020 "gc.statepoint number of deoptimization arguments "
2021 "must be constant integer",
2022 Call);
2023 const int NumDeoptArgs = cast<ConstantInt>(NumDeoptArgsV)->getZExtValue();
2024 Assert(NumDeoptArgs >= 0,
2025 "gc.statepoint number of deoptimization arguments "
2026 "must be positive",
2027 Call);
2029 const int ExpectedNumArgs =
2030 7 + NumCallArgs + NumTransitionArgs + NumDeoptArgs;
2031 Assert(ExpectedNumArgs <= (int)Call.arg_size(),
2032 "gc.statepoint too few arguments according to length fields", Call);
2034 // Check that the only uses of this gc.statepoint are gc.result or
2035 // gc.relocate calls which are tied to this statepoint and thus part
2036 // of the same statepoint sequence
2037 for (const User *U : Call.users()) {
2038 const CallInst *UserCall = dyn_cast<const CallInst>(U);
2039 Assert(UserCall, "illegal use of statepoint token", Call, U);
2040 if (!UserCall)
2041 continue;
2042 Assert(isa<GCRelocateInst>(UserCall) || isa<GCResultInst>(UserCall),
2043 "gc.result or gc.relocate are the only value uses "
2044 "of a gc.statepoint",
2045 Call, U);
2046 if (isa<GCResultInst>(UserCall)) {
2047 Assert(UserCall->getArgOperand(0) == &Call,
2048 "gc.result connected to wrong gc.statepoint", Call, UserCall);
2049 } else if (isa<GCRelocateInst>(Call)) {
2050 Assert(UserCall->getArgOperand(0) == &Call,
2051 "gc.relocate connected to wrong gc.statepoint", Call, UserCall);
2055 // Note: It is legal for a single derived pointer to be listed multiple
2056 // times. It's non-optimal, but it is legal. It can also happen after
2057 // insertion if we strip a bitcast away.
2058 // Note: It is really tempting to check that each base is relocated and
2059 // that a derived pointer is never reused as a base pointer. This turns
2060 // out to be problematic since optimizations run after safepoint insertion
2061 // can recognize equality properties that the insertion logic doesn't know
2062 // about. See example statepoint.ll in the verifier subdirectory
2065 void Verifier::verifyFrameRecoverIndices() {
2066 for (auto &Counts : FrameEscapeInfo) {
2067 Function *F = Counts.first;
2068 unsigned EscapedObjectCount = Counts.second.first;
2069 unsigned MaxRecoveredIndex = Counts.second.second;
2070 Assert(MaxRecoveredIndex <= EscapedObjectCount,
2071 "all indices passed to llvm.localrecover must be less than the "
2072 "number of arguments passed to llvm.localescape in the parent "
2073 "function",
2078 static Instruction *getSuccPad(Instruction *Terminator) {
2079 BasicBlock *UnwindDest;
2080 if (auto *II = dyn_cast<InvokeInst>(Terminator))
2081 UnwindDest = II->getUnwindDest();
2082 else if (auto *CSI = dyn_cast<CatchSwitchInst>(Terminator))
2083 UnwindDest = CSI->getUnwindDest();
2084 else
2085 UnwindDest = cast<CleanupReturnInst>(Terminator)->getUnwindDest();
2086 return UnwindDest->getFirstNonPHI();
2089 void Verifier::verifySiblingFuncletUnwinds() {
2090 SmallPtrSet<Instruction *, 8> Visited;
2091 SmallPtrSet<Instruction *, 8> Active;
2092 for (const auto &Pair : SiblingFuncletInfo) {
2093 Instruction *PredPad = Pair.first;
2094 if (Visited.count(PredPad))
2095 continue;
2096 Active.insert(PredPad);
2097 Instruction *Terminator = Pair.second;
2098 do {
2099 Instruction *SuccPad = getSuccPad(Terminator);
2100 if (Active.count(SuccPad)) {
2101 // Found a cycle; report error
2102 Instruction *CyclePad = SuccPad;
2103 SmallVector<Instruction *, 8> CycleNodes;
2104 do {
2105 CycleNodes.push_back(CyclePad);
2106 Instruction *CycleTerminator = SiblingFuncletInfo[CyclePad];
2107 if (CycleTerminator != CyclePad)
2108 CycleNodes.push_back(CycleTerminator);
2109 CyclePad = getSuccPad(CycleTerminator);
2110 } while (CyclePad != SuccPad);
2111 Assert(false, "EH pads can't handle each other's exceptions",
2112 ArrayRef<Instruction *>(CycleNodes));
2114 // Don't re-walk a node we've already checked
2115 if (!Visited.insert(SuccPad).second)
2116 break;
2117 // Walk to this successor if it has a map entry.
2118 PredPad = SuccPad;
2119 auto TermI = SiblingFuncletInfo.find(PredPad);
2120 if (TermI == SiblingFuncletInfo.end())
2121 break;
2122 Terminator = TermI->second;
2123 Active.insert(PredPad);
2124 } while (true);
2125 // Each node only has one successor, so we've walked all the active
2126 // nodes' successors.
2127 Active.clear();
2131 // visitFunction - Verify that a function is ok.
2133 void Verifier::visitFunction(const Function &F) {
2134 visitGlobalValue(F);
2136 // Check function arguments.
2137 FunctionType *FT = F.getFunctionType();
2138 unsigned NumArgs = F.arg_size();
2140 Assert(&Context == &F.getContext(),
2141 "Function context does not match Module context!", &F);
2143 Assert(!F.hasCommonLinkage(), "Functions may not have common linkage", &F);
2144 Assert(FT->getNumParams() == NumArgs,
2145 "# formal arguments must match # of arguments for function type!", &F,
2146 FT);
2147 Assert(F.getReturnType()->isFirstClassType() ||
2148 F.getReturnType()->isVoidTy() || F.getReturnType()->isStructTy(),
2149 "Functions cannot return aggregate values!", &F);
2151 Assert(!F.hasStructRetAttr() || F.getReturnType()->isVoidTy(),
2152 "Invalid struct return type!", &F);
2154 AttributeList Attrs = F.getAttributes();
2156 Assert(verifyAttributeCount(Attrs, FT->getNumParams()),
2157 "Attribute after last parameter!", &F);
2159 bool isLLVMdotName = F.getName().size() >= 5 &&
2160 F.getName().substr(0, 5) == "llvm.";
2162 // Check function attributes.
2163 verifyFunctionAttrs(FT, Attrs, &F, isLLVMdotName);
2165 // On function declarations/definitions, we do not support the builtin
2166 // attribute. We do not check this in VerifyFunctionAttrs since that is
2167 // checking for Attributes that can/can not ever be on functions.
2168 Assert(!Attrs.hasFnAttribute(Attribute::Builtin),
2169 "Attribute 'builtin' can only be applied to a callsite.", &F);
2171 // Check that this function meets the restrictions on this calling convention.
2172 // Sometimes varargs is used for perfectly forwarding thunks, so some of these
2173 // restrictions can be lifted.
2174 switch (F.getCallingConv()) {
2175 default:
2176 case CallingConv::C:
2177 break;
2178 case CallingConv::AMDGPU_KERNEL:
2179 case CallingConv::SPIR_KERNEL:
2180 Assert(F.getReturnType()->isVoidTy(),
2181 "Calling convention requires void return type", &F);
2182 LLVM_FALLTHROUGH;
2183 case CallingConv::AMDGPU_VS:
2184 case CallingConv::AMDGPU_HS:
2185 case CallingConv::AMDGPU_GS:
2186 case CallingConv::AMDGPU_PS:
2187 case CallingConv::AMDGPU_CS:
2188 Assert(!F.hasStructRetAttr(),
2189 "Calling convention does not allow sret", &F);
2190 LLVM_FALLTHROUGH;
2191 case CallingConv::Fast:
2192 case CallingConv::Cold:
2193 case CallingConv::Intel_OCL_BI:
2194 case CallingConv::PTX_Kernel:
2195 case CallingConv::PTX_Device:
2196 Assert(!F.isVarArg(), "Calling convention does not support varargs or "
2197 "perfect forwarding!",
2198 &F);
2199 break;
2202 // Check that the argument values match the function type for this function...
2203 unsigned i = 0;
2204 for (const Argument &Arg : F.args()) {
2205 Assert(Arg.getType() == FT->getParamType(i),
2206 "Argument value does not match function argument type!", &Arg,
2207 FT->getParamType(i));
2208 Assert(Arg.getType()->isFirstClassType(),
2209 "Function arguments must have first-class types!", &Arg);
2210 if (!isLLVMdotName) {
2211 Assert(!Arg.getType()->isMetadataTy(),
2212 "Function takes metadata but isn't an intrinsic", &Arg, &F);
2213 Assert(!Arg.getType()->isTokenTy(),
2214 "Function takes token but isn't an intrinsic", &Arg, &F);
2217 // Check that swifterror argument is only used by loads and stores.
2218 if (Attrs.hasParamAttribute(i, Attribute::SwiftError)) {
2219 verifySwiftErrorValue(&Arg);
2221 ++i;
2224 if (!isLLVMdotName)
2225 Assert(!F.getReturnType()->isTokenTy(),
2226 "Functions returns a token but isn't an intrinsic", &F);
2228 // Get the function metadata attachments.
2229 SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
2230 F.getAllMetadata(MDs);
2231 assert(F.hasMetadata() != MDs.empty() && "Bit out-of-sync");
2232 verifyFunctionMetadata(MDs);
2234 // Check validity of the personality function
2235 if (F.hasPersonalityFn()) {
2236 auto *Per = dyn_cast<Function>(F.getPersonalityFn()->stripPointerCasts());
2237 if (Per)
2238 Assert(Per->getParent() == F.getParent(),
2239 "Referencing personality function in another module!",
2240 &F, F.getParent(), Per, Per->getParent());
2243 if (F.isMaterializable()) {
2244 // Function has a body somewhere we can't see.
2245 Assert(MDs.empty(), "unmaterialized function cannot have metadata", &F,
2246 MDs.empty() ? nullptr : MDs.front().second);
2247 } else if (F.isDeclaration()) {
2248 for (const auto &I : MDs) {
2249 // This is used for call site debug information.
2250 AssertDI(I.first != LLVMContext::MD_dbg ||
2251 !cast<DISubprogram>(I.second)->isDistinct(),
2252 "function declaration may only have a unique !dbg attachment",
2253 &F);
2254 Assert(I.first != LLVMContext::MD_prof,
2255 "function declaration may not have a !prof attachment", &F);
2257 // Verify the metadata itself.
2258 visitMDNode(*I.second);
2260 Assert(!F.hasPersonalityFn(),
2261 "Function declaration shouldn't have a personality routine", &F);
2262 } else {
2263 // Verify that this function (which has a body) is not named "llvm.*". It
2264 // is not legal to define intrinsics.
2265 Assert(!isLLVMdotName, "llvm intrinsics cannot be defined!", &F);
2267 // Check the entry node
2268 const BasicBlock *Entry = &F.getEntryBlock();
2269 Assert(pred_empty(Entry),
2270 "Entry block to function must not have predecessors!", Entry);
2272 // The address of the entry block cannot be taken, unless it is dead.
2273 if (Entry->hasAddressTaken()) {
2274 Assert(!BlockAddress::lookup(Entry)->isConstantUsed(),
2275 "blockaddress may not be used with the entry block!", Entry);
2278 unsigned NumDebugAttachments = 0, NumProfAttachments = 0;
2279 // Visit metadata attachments.
2280 for (const auto &I : MDs) {
2281 // Verify that the attachment is legal.
2282 switch (I.first) {
2283 default:
2284 break;
2285 case LLVMContext::MD_dbg: {
2286 ++NumDebugAttachments;
2287 AssertDI(NumDebugAttachments == 1,
2288 "function must have a single !dbg attachment", &F, I.second);
2289 AssertDI(isa<DISubprogram>(I.second),
2290 "function !dbg attachment must be a subprogram", &F, I.second);
2291 auto *SP = cast<DISubprogram>(I.second);
2292 const Function *&AttachedTo = DISubprogramAttachments[SP];
2293 AssertDI(!AttachedTo || AttachedTo == &F,
2294 "DISubprogram attached to more than one function", SP, &F);
2295 AttachedTo = &F;
2296 break;
2298 case LLVMContext::MD_prof:
2299 ++NumProfAttachments;
2300 Assert(NumProfAttachments == 1,
2301 "function must have a single !prof attachment", &F, I.second);
2302 break;
2305 // Verify the metadata itself.
2306 visitMDNode(*I.second);
2310 // If this function is actually an intrinsic, verify that it is only used in
2311 // direct call/invokes, never having its "address taken".
2312 // Only do this if the module is materialized, otherwise we don't have all the
2313 // uses.
2314 if (F.getIntrinsicID() && F.getParent()->isMaterialized()) {
2315 const User *U;
2316 if (F.hasAddressTaken(&U))
2317 Assert(false, "Invalid user of intrinsic instruction!", U);
2320 auto *N = F.getSubprogram();
2321 HasDebugInfo = (N != nullptr);
2322 if (!HasDebugInfo)
2323 return;
2325 // Check that all !dbg attachments lead to back to N (or, at least, another
2326 // subprogram that describes the same function).
2328 // FIXME: Check this incrementally while visiting !dbg attachments.
2329 // FIXME: Only check when N is the canonical subprogram for F.
2330 SmallPtrSet<const MDNode *, 32> Seen;
2331 auto VisitDebugLoc = [&](const Instruction &I, const MDNode *Node) {
2332 // Be careful about using DILocation here since we might be dealing with
2333 // broken code (this is the Verifier after all).
2334 const DILocation *DL = dyn_cast_or_null<DILocation>(Node);
2335 if (!DL)
2336 return;
2337 if (!Seen.insert(DL).second)
2338 return;
2340 Metadata *Parent = DL->getRawScope();
2341 AssertDI(Parent && isa<DILocalScope>(Parent),
2342 "DILocation's scope must be a DILocalScope", N, &F, &I, DL,
2343 Parent);
2344 DILocalScope *Scope = DL->getInlinedAtScope();
2345 if (Scope && !Seen.insert(Scope).second)
2346 return;
2348 DISubprogram *SP = Scope ? Scope->getSubprogram() : nullptr;
2350 // Scope and SP could be the same MDNode and we don't want to skip
2351 // validation in that case
2352 if (SP && ((Scope != SP) && !Seen.insert(SP).second))
2353 return;
2355 // FIXME: Once N is canonical, check "SP == &N".
2356 AssertDI(SP->describes(&F),
2357 "!dbg attachment points at wrong subprogram for function", N, &F,
2358 &I, DL, Scope, SP);
2360 for (auto &BB : F)
2361 for (auto &I : BB) {
2362 VisitDebugLoc(I, I.getDebugLoc().getAsMDNode());
2363 // The llvm.loop annotations also contain two DILocations.
2364 if (auto MD = I.getMetadata(LLVMContext::MD_loop))
2365 for (unsigned i = 1; i < MD->getNumOperands(); ++i)
2366 VisitDebugLoc(I, dyn_cast_or_null<MDNode>(MD->getOperand(i)));
2367 if (BrokenDebugInfo)
2368 return;
2372 // verifyBasicBlock - Verify that a basic block is well formed...
2374 void Verifier::visitBasicBlock(BasicBlock &BB) {
2375 InstsInThisBlock.clear();
2377 // Ensure that basic blocks have terminators!
2378 Assert(BB.getTerminator(), "Basic Block does not have terminator!", &BB);
2380 // Check constraints that this basic block imposes on all of the PHI nodes in
2381 // it.
2382 if (isa<PHINode>(BB.front())) {
2383 SmallVector<BasicBlock*, 8> Preds(pred_begin(&BB), pred_end(&BB));
2384 SmallVector<std::pair<BasicBlock*, Value*>, 8> Values;
2385 llvm::sort(Preds);
2386 for (const PHINode &PN : BB.phis()) {
2387 // Ensure that PHI nodes have at least one entry!
2388 Assert(PN.getNumIncomingValues() != 0,
2389 "PHI nodes must have at least one entry. If the block is dead, "
2390 "the PHI should be removed!",
2391 &PN);
2392 Assert(PN.getNumIncomingValues() == Preds.size(),
2393 "PHINode should have one entry for each predecessor of its "
2394 "parent basic block!",
2395 &PN);
2397 // Get and sort all incoming values in the PHI node...
2398 Values.clear();
2399 Values.reserve(PN.getNumIncomingValues());
2400 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
2401 Values.push_back(
2402 std::make_pair(PN.getIncomingBlock(i), PN.getIncomingValue(i)));
2403 llvm::sort(Values);
2405 for (unsigned i = 0, e = Values.size(); i != e; ++i) {
2406 // Check to make sure that if there is more than one entry for a
2407 // particular basic block in this PHI node, that the incoming values are
2408 // all identical.
2410 Assert(i == 0 || Values[i].first != Values[i - 1].first ||
2411 Values[i].second == Values[i - 1].second,
2412 "PHI node has multiple entries for the same basic block with "
2413 "different incoming values!",
2414 &PN, Values[i].first, Values[i].second, Values[i - 1].second);
2416 // Check to make sure that the predecessors and PHI node entries are
2417 // matched up.
2418 Assert(Values[i].first == Preds[i],
2419 "PHI node entries do not match predecessors!", &PN,
2420 Values[i].first, Preds[i]);
2425 // Check that all instructions have their parent pointers set up correctly.
2426 for (auto &I : BB)
2428 Assert(I.getParent() == &BB, "Instruction has bogus parent pointer!");
2432 void Verifier::visitTerminator(Instruction &I) {
2433 // Ensure that terminators only exist at the end of the basic block.
2434 Assert(&I == I.getParent()->getTerminator(),
2435 "Terminator found in the middle of a basic block!", I.getParent());
2436 visitInstruction(I);
2439 void Verifier::visitBranchInst(BranchInst &BI) {
2440 if (BI.isConditional()) {
2441 Assert(BI.getCondition()->getType()->isIntegerTy(1),
2442 "Branch condition is not 'i1' type!", &BI, BI.getCondition());
2444 visitTerminator(BI);
2447 void Verifier::visitReturnInst(ReturnInst &RI) {
2448 Function *F = RI.getParent()->getParent();
2449 unsigned N = RI.getNumOperands();
2450 if (F->getReturnType()->isVoidTy())
2451 Assert(N == 0,
2452 "Found return instr that returns non-void in Function of void "
2453 "return type!",
2454 &RI, F->getReturnType());
2455 else
2456 Assert(N == 1 && F->getReturnType() == RI.getOperand(0)->getType(),
2457 "Function return type does not match operand "
2458 "type of return inst!",
2459 &RI, F->getReturnType());
2461 // Check to make sure that the return value has necessary properties for
2462 // terminators...
2463 visitTerminator(RI);
2466 void Verifier::visitSwitchInst(SwitchInst &SI) {
2467 // Check to make sure that all of the constants in the switch instruction
2468 // have the same type as the switched-on value.
2469 Type *SwitchTy = SI.getCondition()->getType();
2470 SmallPtrSet<ConstantInt*, 32> Constants;
2471 for (auto &Case : SI.cases()) {
2472 Assert(Case.getCaseValue()->getType() == SwitchTy,
2473 "Switch constants must all be same type as switch value!", &SI);
2474 Assert(Constants.insert(Case.getCaseValue()).second,
2475 "Duplicate integer as switch case", &SI, Case.getCaseValue());
2478 visitTerminator(SI);
2481 void Verifier::visitIndirectBrInst(IndirectBrInst &BI) {
2482 Assert(BI.getAddress()->getType()->isPointerTy(),
2483 "Indirectbr operand must have pointer type!", &BI);
2484 for (unsigned i = 0, e = BI.getNumDestinations(); i != e; ++i)
2485 Assert(BI.getDestination(i)->getType()->isLabelTy(),
2486 "Indirectbr destinations must all have pointer type!", &BI);
2488 visitTerminator(BI);
2491 void Verifier::visitCallBrInst(CallBrInst &CBI) {
2492 Assert(CBI.isInlineAsm(), "Callbr is currently only used for asm-goto!",
2493 &CBI);
2494 Assert(CBI.getType()->isVoidTy(), "Callbr return value is not supported!",
2495 &CBI);
2496 for (unsigned i = 0, e = CBI.getNumSuccessors(); i != e; ++i)
2497 Assert(CBI.getSuccessor(i)->getType()->isLabelTy(),
2498 "Callbr successors must all have pointer type!", &CBI);
2499 for (unsigned i = 0, e = CBI.getNumOperands(); i != e; ++i) {
2500 Assert(i >= CBI.getNumArgOperands() || !isa<BasicBlock>(CBI.getOperand(i)),
2501 "Using an unescaped label as a callbr argument!", &CBI);
2502 if (isa<BasicBlock>(CBI.getOperand(i)))
2503 for (unsigned j = i + 1; j != e; ++j)
2504 Assert(CBI.getOperand(i) != CBI.getOperand(j),
2505 "Duplicate callbr destination!", &CBI);
2508 visitTerminator(CBI);
2511 void Verifier::visitSelectInst(SelectInst &SI) {
2512 Assert(!SelectInst::areInvalidOperands(SI.getOperand(0), SI.getOperand(1),
2513 SI.getOperand(2)),
2514 "Invalid operands for select instruction!", &SI);
2516 Assert(SI.getTrueValue()->getType() == SI.getType(),
2517 "Select values must have same type as select instruction!", &SI);
2518 visitInstruction(SI);
2521 /// visitUserOp1 - User defined operators shouldn't live beyond the lifetime of
2522 /// a pass, if any exist, it's an error.
2524 void Verifier::visitUserOp1(Instruction &I) {
2525 Assert(false, "User-defined operators should not live outside of a pass!", &I);
2528 void Verifier::visitTruncInst(TruncInst &I) {
2529 // Get the source and destination types
2530 Type *SrcTy = I.getOperand(0)->getType();
2531 Type *DestTy = I.getType();
2533 // Get the size of the types in bits, we'll need this later
2534 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2535 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2537 Assert(SrcTy->isIntOrIntVectorTy(), "Trunc only operates on integer", &I);
2538 Assert(DestTy->isIntOrIntVectorTy(), "Trunc only produces integer", &I);
2539 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2540 "trunc source and destination must both be a vector or neither", &I);
2541 Assert(SrcBitSize > DestBitSize, "DestTy too big for Trunc", &I);
2543 visitInstruction(I);
2546 void Verifier::visitZExtInst(ZExtInst &I) {
2547 // Get the source and destination types
2548 Type *SrcTy = I.getOperand(0)->getType();
2549 Type *DestTy = I.getType();
2551 // Get the size of the types in bits, we'll need this later
2552 Assert(SrcTy->isIntOrIntVectorTy(), "ZExt only operates on integer", &I);
2553 Assert(DestTy->isIntOrIntVectorTy(), "ZExt only produces an integer", &I);
2554 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2555 "zext source and destination must both be a vector or neither", &I);
2556 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2557 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2559 Assert(SrcBitSize < DestBitSize, "Type too small for ZExt", &I);
2561 visitInstruction(I);
2564 void Verifier::visitSExtInst(SExtInst &I) {
2565 // Get the source and destination types
2566 Type *SrcTy = I.getOperand(0)->getType();
2567 Type *DestTy = I.getType();
2569 // Get the size of the types in bits, we'll need this later
2570 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2571 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2573 Assert(SrcTy->isIntOrIntVectorTy(), "SExt only operates on integer", &I);
2574 Assert(DestTy->isIntOrIntVectorTy(), "SExt only produces an integer", &I);
2575 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2576 "sext source and destination must both be a vector or neither", &I);
2577 Assert(SrcBitSize < DestBitSize, "Type too small for SExt", &I);
2579 visitInstruction(I);
2582 void Verifier::visitFPTruncInst(FPTruncInst &I) {
2583 // Get the source and destination types
2584 Type *SrcTy = I.getOperand(0)->getType();
2585 Type *DestTy = I.getType();
2586 // Get the size of the types in bits, we'll need this later
2587 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2588 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2590 Assert(SrcTy->isFPOrFPVectorTy(), "FPTrunc only operates on FP", &I);
2591 Assert(DestTy->isFPOrFPVectorTy(), "FPTrunc only produces an FP", &I);
2592 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2593 "fptrunc source and destination must both be a vector or neither", &I);
2594 Assert(SrcBitSize > DestBitSize, "DestTy too big for FPTrunc", &I);
2596 visitInstruction(I);
2599 void Verifier::visitFPExtInst(FPExtInst &I) {
2600 // Get the source and destination types
2601 Type *SrcTy = I.getOperand(0)->getType();
2602 Type *DestTy = I.getType();
2604 // Get the size of the types in bits, we'll need this later
2605 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
2606 unsigned DestBitSize = DestTy->getScalarSizeInBits();
2608 Assert(SrcTy->isFPOrFPVectorTy(), "FPExt only operates on FP", &I);
2609 Assert(DestTy->isFPOrFPVectorTy(), "FPExt only produces an FP", &I);
2610 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(),
2611 "fpext source and destination must both be a vector or neither", &I);
2612 Assert(SrcBitSize < DestBitSize, "DestTy too small for FPExt", &I);
2614 visitInstruction(I);
2617 void Verifier::visitUIToFPInst(UIToFPInst &I) {
2618 // Get the source and destination types
2619 Type *SrcTy = I.getOperand(0)->getType();
2620 Type *DestTy = I.getType();
2622 bool SrcVec = SrcTy->isVectorTy();
2623 bool DstVec = DestTy->isVectorTy();
2625 Assert(SrcVec == DstVec,
2626 "UIToFP source and dest must both be vector or scalar", &I);
2627 Assert(SrcTy->isIntOrIntVectorTy(),
2628 "UIToFP source must be integer or integer vector", &I);
2629 Assert(DestTy->isFPOrFPVectorTy(), "UIToFP result must be FP or FP vector",
2630 &I);
2632 if (SrcVec && DstVec)
2633 Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2634 cast<VectorType>(DestTy)->getNumElements(),
2635 "UIToFP source and dest vector length mismatch", &I);
2637 visitInstruction(I);
2640 void Verifier::visitSIToFPInst(SIToFPInst &I) {
2641 // Get the source and destination types
2642 Type *SrcTy = I.getOperand(0)->getType();
2643 Type *DestTy = I.getType();
2645 bool SrcVec = SrcTy->isVectorTy();
2646 bool DstVec = DestTy->isVectorTy();
2648 Assert(SrcVec == DstVec,
2649 "SIToFP source and dest must both be vector or scalar", &I);
2650 Assert(SrcTy->isIntOrIntVectorTy(),
2651 "SIToFP source must be integer or integer vector", &I);
2652 Assert(DestTy->isFPOrFPVectorTy(), "SIToFP result must be FP or FP vector",
2653 &I);
2655 if (SrcVec && DstVec)
2656 Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2657 cast<VectorType>(DestTy)->getNumElements(),
2658 "SIToFP source and dest vector length mismatch", &I);
2660 visitInstruction(I);
2663 void Verifier::visitFPToUIInst(FPToUIInst &I) {
2664 // Get the source and destination types
2665 Type *SrcTy = I.getOperand(0)->getType();
2666 Type *DestTy = I.getType();
2668 bool SrcVec = SrcTy->isVectorTy();
2669 bool DstVec = DestTy->isVectorTy();
2671 Assert(SrcVec == DstVec,
2672 "FPToUI source and dest must both be vector or scalar", &I);
2673 Assert(SrcTy->isFPOrFPVectorTy(), "FPToUI source must be FP or FP vector",
2674 &I);
2675 Assert(DestTy->isIntOrIntVectorTy(),
2676 "FPToUI result must be integer or integer vector", &I);
2678 if (SrcVec && DstVec)
2679 Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2680 cast<VectorType>(DestTy)->getNumElements(),
2681 "FPToUI source and dest vector length mismatch", &I);
2683 visitInstruction(I);
2686 void Verifier::visitFPToSIInst(FPToSIInst &I) {
2687 // Get the source and destination types
2688 Type *SrcTy = I.getOperand(0)->getType();
2689 Type *DestTy = I.getType();
2691 bool SrcVec = SrcTy->isVectorTy();
2692 bool DstVec = DestTy->isVectorTy();
2694 Assert(SrcVec == DstVec,
2695 "FPToSI source and dest must both be vector or scalar", &I);
2696 Assert(SrcTy->isFPOrFPVectorTy(), "FPToSI source must be FP or FP vector",
2697 &I);
2698 Assert(DestTy->isIntOrIntVectorTy(),
2699 "FPToSI result must be integer or integer vector", &I);
2701 if (SrcVec && DstVec)
2702 Assert(cast<VectorType>(SrcTy)->getNumElements() ==
2703 cast<VectorType>(DestTy)->getNumElements(),
2704 "FPToSI source and dest vector length mismatch", &I);
2706 visitInstruction(I);
2709 void Verifier::visitPtrToIntInst(PtrToIntInst &I) {
2710 // Get the source and destination types
2711 Type *SrcTy = I.getOperand(0)->getType();
2712 Type *DestTy = I.getType();
2714 Assert(SrcTy->isPtrOrPtrVectorTy(), "PtrToInt source must be pointer", &I);
2716 if (auto *PTy = dyn_cast<PointerType>(SrcTy->getScalarType()))
2717 Assert(!DL.isNonIntegralPointerType(PTy),
2718 "ptrtoint not supported for non-integral pointers");
2720 Assert(DestTy->isIntOrIntVectorTy(), "PtrToInt result must be integral", &I);
2721 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "PtrToInt type mismatch",
2722 &I);
2724 if (SrcTy->isVectorTy()) {
2725 VectorType *VSrc = cast<VectorType>(SrcTy);
2726 VectorType *VDest = cast<VectorType>(DestTy);
2727 Assert(VSrc->getNumElements() == VDest->getNumElements(),
2728 "PtrToInt Vector width mismatch", &I);
2731 visitInstruction(I);
2734 void Verifier::visitIntToPtrInst(IntToPtrInst &I) {
2735 // Get the source and destination types
2736 Type *SrcTy = I.getOperand(0)->getType();
2737 Type *DestTy = I.getType();
2739 Assert(SrcTy->isIntOrIntVectorTy(),
2740 "IntToPtr source must be an integral", &I);
2741 Assert(DestTy->isPtrOrPtrVectorTy(), "IntToPtr result must be a pointer", &I);
2743 if (auto *PTy = dyn_cast<PointerType>(DestTy->getScalarType()))
2744 Assert(!DL.isNonIntegralPointerType(PTy),
2745 "inttoptr not supported for non-integral pointers");
2747 Assert(SrcTy->isVectorTy() == DestTy->isVectorTy(), "IntToPtr type mismatch",
2748 &I);
2749 if (SrcTy->isVectorTy()) {
2750 VectorType *VSrc = cast<VectorType>(SrcTy);
2751 VectorType *VDest = cast<VectorType>(DestTy);
2752 Assert(VSrc->getNumElements() == VDest->getNumElements(),
2753 "IntToPtr Vector width mismatch", &I);
2755 visitInstruction(I);
2758 void Verifier::visitBitCastInst(BitCastInst &I) {
2759 Assert(
2760 CastInst::castIsValid(Instruction::BitCast, I.getOperand(0), I.getType()),
2761 "Invalid bitcast", &I);
2762 visitInstruction(I);
2765 void Verifier::visitAddrSpaceCastInst(AddrSpaceCastInst &I) {
2766 Type *SrcTy = I.getOperand(0)->getType();
2767 Type *DestTy = I.getType();
2769 Assert(SrcTy->isPtrOrPtrVectorTy(), "AddrSpaceCast source must be a pointer",
2770 &I);
2771 Assert(DestTy->isPtrOrPtrVectorTy(), "AddrSpaceCast result must be a pointer",
2772 &I);
2773 Assert(SrcTy->getPointerAddressSpace() != DestTy->getPointerAddressSpace(),
2774 "AddrSpaceCast must be between different address spaces", &I);
2775 if (SrcTy->isVectorTy())
2776 Assert(SrcTy->getVectorNumElements() == DestTy->getVectorNumElements(),
2777 "AddrSpaceCast vector pointer number of elements mismatch", &I);
2778 visitInstruction(I);
2781 /// visitPHINode - Ensure that a PHI node is well formed.
2783 void Verifier::visitPHINode(PHINode &PN) {
2784 // Ensure that the PHI nodes are all grouped together at the top of the block.
2785 // This can be tested by checking whether the instruction before this is
2786 // either nonexistent (because this is begin()) or is a PHI node. If not,
2787 // then there is some other instruction before a PHI.
2788 Assert(&PN == &PN.getParent()->front() ||
2789 isa<PHINode>(--BasicBlock::iterator(&PN)),
2790 "PHI nodes not grouped at top of basic block!", &PN, PN.getParent());
2792 // Check that a PHI doesn't yield a Token.
2793 Assert(!PN.getType()->isTokenTy(), "PHI nodes cannot have token type!");
2795 // Check that all of the values of the PHI node have the same type as the
2796 // result, and that the incoming blocks are really basic blocks.
2797 for (Value *IncValue : PN.incoming_values()) {
2798 Assert(PN.getType() == IncValue->getType(),
2799 "PHI node operands are not the same type as the result!", &PN);
2802 // All other PHI node constraints are checked in the visitBasicBlock method.
2804 visitInstruction(PN);
2807 void Verifier::visitCallBase(CallBase &Call) {
2808 Assert(Call.getCalledValue()->getType()->isPointerTy(),
2809 "Called function must be a pointer!", Call);
2810 PointerType *FPTy = cast<PointerType>(Call.getCalledValue()->getType());
2812 Assert(FPTy->getElementType()->isFunctionTy(),
2813 "Called function is not pointer to function type!", Call);
2815 Assert(FPTy->getElementType() == Call.getFunctionType(),
2816 "Called function is not the same type as the call!", Call);
2818 FunctionType *FTy = Call.getFunctionType();
2820 // Verify that the correct number of arguments are being passed
2821 if (FTy->isVarArg())
2822 Assert(Call.arg_size() >= FTy->getNumParams(),
2823 "Called function requires more parameters than were provided!",
2824 Call);
2825 else
2826 Assert(Call.arg_size() == FTy->getNumParams(),
2827 "Incorrect number of arguments passed to called function!", Call);
2829 // Verify that all arguments to the call match the function type.
2830 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i)
2831 Assert(Call.getArgOperand(i)->getType() == FTy->getParamType(i),
2832 "Call parameter type does not match function signature!",
2833 Call.getArgOperand(i), FTy->getParamType(i), Call);
2835 AttributeList Attrs = Call.getAttributes();
2837 Assert(verifyAttributeCount(Attrs, Call.arg_size()),
2838 "Attribute after last parameter!", Call);
2840 bool IsIntrinsic = Call.getCalledFunction() &&
2841 Call.getCalledFunction()->getName().startswith("llvm.");
2843 Function *Callee
2844 = dyn_cast<Function>(Call.getCalledValue()->stripPointerCasts());
2846 if (Attrs.hasAttribute(AttributeList::FunctionIndex, Attribute::Speculatable)) {
2847 // Don't allow speculatable on call sites, unless the underlying function
2848 // declaration is also speculatable.
2849 Assert(Callee && Callee->isSpeculatable(),
2850 "speculatable attribute may not apply to call sites", Call);
2853 // Verify call attributes.
2854 verifyFunctionAttrs(FTy, Attrs, &Call, IsIntrinsic);
2856 // Conservatively check the inalloca argument.
2857 // We have a bug if we can find that there is an underlying alloca without
2858 // inalloca.
2859 if (Call.hasInAllocaArgument()) {
2860 Value *InAllocaArg = Call.getArgOperand(FTy->getNumParams() - 1);
2861 if (auto AI = dyn_cast<AllocaInst>(InAllocaArg->stripInBoundsOffsets()))
2862 Assert(AI->isUsedWithInAlloca(),
2863 "inalloca argument for call has mismatched alloca", AI, Call);
2866 // For each argument of the callsite, if it has the swifterror argument,
2867 // make sure the underlying alloca/parameter it comes from has a swifterror as
2868 // well.
2869 for (unsigned i = 0, e = FTy->getNumParams(); i != e; ++i) {
2870 if (Call.paramHasAttr(i, Attribute::SwiftError)) {
2871 Value *SwiftErrorArg = Call.getArgOperand(i);
2872 if (auto AI = dyn_cast<AllocaInst>(SwiftErrorArg->stripInBoundsOffsets())) {
2873 Assert(AI->isSwiftError(),
2874 "swifterror argument for call has mismatched alloca", AI, Call);
2875 continue;
2877 auto ArgI = dyn_cast<Argument>(SwiftErrorArg);
2878 Assert(ArgI,
2879 "swifterror argument should come from an alloca or parameter",
2880 SwiftErrorArg, Call);
2881 Assert(ArgI->hasSwiftErrorAttr(),
2882 "swifterror argument for call has mismatched parameter", ArgI,
2883 Call);
2886 if (Attrs.hasParamAttribute(i, Attribute::ImmArg)) {
2887 // Don't allow immarg on call sites, unless the underlying declaration
2888 // also has the matching immarg.
2889 Assert(Callee && Callee->hasParamAttribute(i, Attribute::ImmArg),
2890 "immarg may not apply only to call sites",
2891 Call.getArgOperand(i), Call);
2894 if (Call.paramHasAttr(i, Attribute::ImmArg)) {
2895 Value *ArgVal = Call.getArgOperand(i);
2896 Assert(isa<ConstantInt>(ArgVal) || isa<ConstantFP>(ArgVal),
2897 "immarg operand has non-immediate parameter", ArgVal, Call);
2901 if (FTy->isVarArg()) {
2902 // FIXME? is 'nest' even legal here?
2903 bool SawNest = false;
2904 bool SawReturned = false;
2906 for (unsigned Idx = 0; Idx < FTy->getNumParams(); ++Idx) {
2907 if (Attrs.hasParamAttribute(Idx, Attribute::Nest))
2908 SawNest = true;
2909 if (Attrs.hasParamAttribute(Idx, Attribute::Returned))
2910 SawReturned = true;
2913 // Check attributes on the varargs part.
2914 for (unsigned Idx = FTy->getNumParams(); Idx < Call.arg_size(); ++Idx) {
2915 Type *Ty = Call.getArgOperand(Idx)->getType();
2916 AttributeSet ArgAttrs = Attrs.getParamAttributes(Idx);
2917 verifyParameterAttrs(ArgAttrs, Ty, &Call);
2919 if (ArgAttrs.hasAttribute(Attribute::Nest)) {
2920 Assert(!SawNest, "More than one parameter has attribute nest!", Call);
2921 SawNest = true;
2924 if (ArgAttrs.hasAttribute(Attribute::Returned)) {
2925 Assert(!SawReturned, "More than one parameter has attribute returned!",
2926 Call);
2927 Assert(Ty->canLosslesslyBitCastTo(FTy->getReturnType()),
2928 "Incompatible argument and return types for 'returned' "
2929 "attribute",
2930 Call);
2931 SawReturned = true;
2934 // Statepoint intrinsic is vararg but the wrapped function may be not.
2935 // Allow sret here and check the wrapped function in verifyStatepoint.
2936 if (!Call.getCalledFunction() ||
2937 Call.getCalledFunction()->getIntrinsicID() !=
2938 Intrinsic::experimental_gc_statepoint)
2939 Assert(!ArgAttrs.hasAttribute(Attribute::StructRet),
2940 "Attribute 'sret' cannot be used for vararg call arguments!",
2941 Call);
2943 if (ArgAttrs.hasAttribute(Attribute::InAlloca))
2944 Assert(Idx == Call.arg_size() - 1,
2945 "inalloca isn't on the last argument!", Call);
2949 // Verify that there's no metadata unless it's a direct call to an intrinsic.
2950 if (!IsIntrinsic) {
2951 for (Type *ParamTy : FTy->params()) {
2952 Assert(!ParamTy->isMetadataTy(),
2953 "Function has metadata parameter but isn't an intrinsic", Call);
2954 Assert(!ParamTy->isTokenTy(),
2955 "Function has token parameter but isn't an intrinsic", Call);
2959 // Verify that indirect calls don't return tokens.
2960 if (!Call.getCalledFunction())
2961 Assert(!FTy->getReturnType()->isTokenTy(),
2962 "Return type cannot be token for indirect call!");
2964 if (Function *F = Call.getCalledFunction())
2965 if (Intrinsic::ID ID = (Intrinsic::ID)F->getIntrinsicID())
2966 visitIntrinsicCall(ID, Call);
2968 // Verify that a callsite has at most one "deopt", at most one "funclet" and
2969 // at most one "gc-transition" operand bundle.
2970 bool FoundDeoptBundle = false, FoundFuncletBundle = false,
2971 FoundGCTransitionBundle = false;
2972 for (unsigned i = 0, e = Call.getNumOperandBundles(); i < e; ++i) {
2973 OperandBundleUse BU = Call.getOperandBundleAt(i);
2974 uint32_t Tag = BU.getTagID();
2975 if (Tag == LLVMContext::OB_deopt) {
2976 Assert(!FoundDeoptBundle, "Multiple deopt operand bundles", Call);
2977 FoundDeoptBundle = true;
2978 } else if (Tag == LLVMContext::OB_gc_transition) {
2979 Assert(!FoundGCTransitionBundle, "Multiple gc-transition operand bundles",
2980 Call);
2981 FoundGCTransitionBundle = true;
2982 } else if (Tag == LLVMContext::OB_funclet) {
2983 Assert(!FoundFuncletBundle, "Multiple funclet operand bundles", Call);
2984 FoundFuncletBundle = true;
2985 Assert(BU.Inputs.size() == 1,
2986 "Expected exactly one funclet bundle operand", Call);
2987 Assert(isa<FuncletPadInst>(BU.Inputs.front()),
2988 "Funclet bundle operands should correspond to a FuncletPadInst",
2989 Call);
2993 // Verify that each inlinable callsite of a debug-info-bearing function in a
2994 // debug-info-bearing function has a debug location attached to it. Failure to
2995 // do so causes assertion failures when the inliner sets up inline scope info.
2996 if (Call.getFunction()->getSubprogram() && Call.getCalledFunction() &&
2997 Call.getCalledFunction()->getSubprogram())
2998 AssertDI(Call.getDebugLoc(),
2999 "inlinable function call in a function with "
3000 "debug info must have a !dbg location",
3001 Call);
3003 visitInstruction(Call);
3006 /// Two types are "congruent" if they are identical, or if they are both pointer
3007 /// types with different pointee types and the same address space.
3008 static bool isTypeCongruent(Type *L, Type *R) {
3009 if (L == R)
3010 return true;
3011 PointerType *PL = dyn_cast<PointerType>(L);
3012 PointerType *PR = dyn_cast<PointerType>(R);
3013 if (!PL || !PR)
3014 return false;
3015 return PL->getAddressSpace() == PR->getAddressSpace();
3018 static AttrBuilder getParameterABIAttributes(int I, AttributeList Attrs) {
3019 static const Attribute::AttrKind ABIAttrs[] = {
3020 Attribute::StructRet, Attribute::ByVal, Attribute::InAlloca,
3021 Attribute::InReg, Attribute::Returned, Attribute::SwiftSelf,
3022 Attribute::SwiftError};
3023 AttrBuilder Copy;
3024 for (auto AK : ABIAttrs) {
3025 if (Attrs.hasParamAttribute(I, AK))
3026 Copy.addAttribute(AK);
3028 if (Attrs.hasParamAttribute(I, Attribute::Alignment))
3029 Copy.addAlignmentAttr(Attrs.getParamAlignment(I));
3030 return Copy;
3033 void Verifier::verifyMustTailCall(CallInst &CI) {
3034 Assert(!CI.isInlineAsm(), "cannot use musttail call with inline asm", &CI);
3036 // - The caller and callee prototypes must match. Pointer types of
3037 // parameters or return types may differ in pointee type, but not
3038 // address space.
3039 Function *F = CI.getParent()->getParent();
3040 FunctionType *CallerTy = F->getFunctionType();
3041 FunctionType *CalleeTy = CI.getFunctionType();
3042 if (!CI.getCalledFunction() || !CI.getCalledFunction()->isIntrinsic()) {
3043 Assert(CallerTy->getNumParams() == CalleeTy->getNumParams(),
3044 "cannot guarantee tail call due to mismatched parameter counts",
3045 &CI);
3046 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3047 Assert(
3048 isTypeCongruent(CallerTy->getParamType(I), CalleeTy->getParamType(I)),
3049 "cannot guarantee tail call due to mismatched parameter types", &CI);
3052 Assert(CallerTy->isVarArg() == CalleeTy->isVarArg(),
3053 "cannot guarantee tail call due to mismatched varargs", &CI);
3054 Assert(isTypeCongruent(CallerTy->getReturnType(), CalleeTy->getReturnType()),
3055 "cannot guarantee tail call due to mismatched return types", &CI);
3057 // - The calling conventions of the caller and callee must match.
3058 Assert(F->getCallingConv() == CI.getCallingConv(),
3059 "cannot guarantee tail call due to mismatched calling conv", &CI);
3061 // - All ABI-impacting function attributes, such as sret, byval, inreg,
3062 // returned, and inalloca, must match.
3063 AttributeList CallerAttrs = F->getAttributes();
3064 AttributeList CalleeAttrs = CI.getAttributes();
3065 for (int I = 0, E = CallerTy->getNumParams(); I != E; ++I) {
3066 AttrBuilder CallerABIAttrs = getParameterABIAttributes(I, CallerAttrs);
3067 AttrBuilder CalleeABIAttrs = getParameterABIAttributes(I, CalleeAttrs);
3068 Assert(CallerABIAttrs == CalleeABIAttrs,
3069 "cannot guarantee tail call due to mismatched ABI impacting "
3070 "function attributes",
3071 &CI, CI.getOperand(I));
3074 // - The call must immediately precede a :ref:`ret <i_ret>` instruction,
3075 // or a pointer bitcast followed by a ret instruction.
3076 // - The ret instruction must return the (possibly bitcasted) value
3077 // produced by the call or void.
3078 Value *RetVal = &CI;
3079 Instruction *Next = CI.getNextNode();
3081 // Handle the optional bitcast.
3082 if (BitCastInst *BI = dyn_cast_or_null<BitCastInst>(Next)) {
3083 Assert(BI->getOperand(0) == RetVal,
3084 "bitcast following musttail call must use the call", BI);
3085 RetVal = BI;
3086 Next = BI->getNextNode();
3089 // Check the return.
3090 ReturnInst *Ret = dyn_cast_or_null<ReturnInst>(Next);
3091 Assert(Ret, "musttail call must precede a ret with an optional bitcast",
3092 &CI);
3093 Assert(!Ret->getReturnValue() || Ret->getReturnValue() == RetVal,
3094 "musttail call result must be returned", Ret);
3097 void Verifier::visitCallInst(CallInst &CI) {
3098 visitCallBase(CI);
3100 if (CI.isMustTailCall())
3101 verifyMustTailCall(CI);
3104 void Verifier::visitInvokeInst(InvokeInst &II) {
3105 visitCallBase(II);
3107 // Verify that the first non-PHI instruction of the unwind destination is an
3108 // exception handling instruction.
3109 Assert(
3110 II.getUnwindDest()->isEHPad(),
3111 "The unwind destination does not have an exception handling instruction!",
3112 &II);
3114 visitTerminator(II);
3117 /// visitUnaryOperator - Check the argument to the unary operator.
3119 void Verifier::visitUnaryOperator(UnaryOperator &U) {
3120 Assert(U.getType() == U.getOperand(0)->getType(),
3121 "Unary operators must have same type for"
3122 "operands and result!",
3123 &U);
3125 switch (U.getOpcode()) {
3126 // Check that floating-point arithmetic operators are only used with
3127 // floating-point operands.
3128 case Instruction::FNeg:
3129 Assert(U.getType()->isFPOrFPVectorTy(),
3130 "FNeg operator only works with float types!", &U);
3131 break;
3132 default:
3133 llvm_unreachable("Unknown UnaryOperator opcode!");
3136 visitInstruction(U);
3139 /// visitBinaryOperator - Check that both arguments to the binary operator are
3140 /// of the same type!
3142 void Verifier::visitBinaryOperator(BinaryOperator &B) {
3143 Assert(B.getOperand(0)->getType() == B.getOperand(1)->getType(),
3144 "Both operands to a binary operator are not of the same type!", &B);
3146 switch (B.getOpcode()) {
3147 // Check that integer arithmetic operators are only used with
3148 // integral operands.
3149 case Instruction::Add:
3150 case Instruction::Sub:
3151 case Instruction::Mul:
3152 case Instruction::SDiv:
3153 case Instruction::UDiv:
3154 case Instruction::SRem:
3155 case Instruction::URem:
3156 Assert(B.getType()->isIntOrIntVectorTy(),
3157 "Integer arithmetic operators only work with integral types!", &B);
3158 Assert(B.getType() == B.getOperand(0)->getType(),
3159 "Integer arithmetic operators must have same type "
3160 "for operands and result!",
3161 &B);
3162 break;
3163 // Check that floating-point arithmetic operators are only used with
3164 // floating-point operands.
3165 case Instruction::FAdd:
3166 case Instruction::FSub:
3167 case Instruction::FMul:
3168 case Instruction::FDiv:
3169 case Instruction::FRem:
3170 Assert(B.getType()->isFPOrFPVectorTy(),
3171 "Floating-point arithmetic operators only work with "
3172 "floating-point types!",
3173 &B);
3174 Assert(B.getType() == B.getOperand(0)->getType(),
3175 "Floating-point arithmetic operators must have same type "
3176 "for operands and result!",
3177 &B);
3178 break;
3179 // Check that logical operators are only used with integral operands.
3180 case Instruction::And:
3181 case Instruction::Or:
3182 case Instruction::Xor:
3183 Assert(B.getType()->isIntOrIntVectorTy(),
3184 "Logical operators only work with integral types!", &B);
3185 Assert(B.getType() == B.getOperand(0)->getType(),
3186 "Logical operators must have same type for operands and result!",
3187 &B);
3188 break;
3189 case Instruction::Shl:
3190 case Instruction::LShr:
3191 case Instruction::AShr:
3192 Assert(B.getType()->isIntOrIntVectorTy(),
3193 "Shifts only work with integral types!", &B);
3194 Assert(B.getType() == B.getOperand(0)->getType(),
3195 "Shift return type must be same as operands!", &B);
3196 break;
3197 default:
3198 llvm_unreachable("Unknown BinaryOperator opcode!");
3201 visitInstruction(B);
3204 void Verifier::visitICmpInst(ICmpInst &IC) {
3205 // Check that the operands are the same type
3206 Type *Op0Ty = IC.getOperand(0)->getType();
3207 Type *Op1Ty = IC.getOperand(1)->getType();
3208 Assert(Op0Ty == Op1Ty,
3209 "Both operands to ICmp instruction are not of the same type!", &IC);
3210 // Check that the operands are the right type
3211 Assert(Op0Ty->isIntOrIntVectorTy() || Op0Ty->isPtrOrPtrVectorTy(),
3212 "Invalid operand types for ICmp instruction", &IC);
3213 // Check that the predicate is valid.
3214 Assert(IC.isIntPredicate(),
3215 "Invalid predicate in ICmp instruction!", &IC);
3217 visitInstruction(IC);
3220 void Verifier::visitFCmpInst(FCmpInst &FC) {
3221 // Check that the operands are the same type
3222 Type *Op0Ty = FC.getOperand(0)->getType();
3223 Type *Op1Ty = FC.getOperand(1)->getType();
3224 Assert(Op0Ty == Op1Ty,
3225 "Both operands to FCmp instruction are not of the same type!", &FC);
3226 // Check that the operands are the right type
3227 Assert(Op0Ty->isFPOrFPVectorTy(),
3228 "Invalid operand types for FCmp instruction", &FC);
3229 // Check that the predicate is valid.
3230 Assert(FC.isFPPredicate(),
3231 "Invalid predicate in FCmp instruction!", &FC);
3233 visitInstruction(FC);
3236 void Verifier::visitExtractElementInst(ExtractElementInst &EI) {
3237 Assert(
3238 ExtractElementInst::isValidOperands(EI.getOperand(0), EI.getOperand(1)),
3239 "Invalid extractelement operands!", &EI);
3240 visitInstruction(EI);
3243 void Verifier::visitInsertElementInst(InsertElementInst &IE) {
3244 Assert(InsertElementInst::isValidOperands(IE.getOperand(0), IE.getOperand(1),
3245 IE.getOperand(2)),
3246 "Invalid insertelement operands!", &IE);
3247 visitInstruction(IE);
3250 void Verifier::visitShuffleVectorInst(ShuffleVectorInst &SV) {
3251 Assert(ShuffleVectorInst::isValidOperands(SV.getOperand(0), SV.getOperand(1),
3252 SV.getOperand(2)),
3253 "Invalid shufflevector operands!", &SV);
3254 visitInstruction(SV);
3257 void Verifier::visitGetElementPtrInst(GetElementPtrInst &GEP) {
3258 Type *TargetTy = GEP.getPointerOperandType()->getScalarType();
3260 Assert(isa<PointerType>(TargetTy),
3261 "GEP base pointer is not a vector or a vector of pointers", &GEP);
3262 Assert(GEP.getSourceElementType()->isSized(), "GEP into unsized type!", &GEP);
3264 SmallVector<Value*, 16> Idxs(GEP.idx_begin(), GEP.idx_end());
3265 Assert(all_of(
3266 Idxs, [](Value* V) { return V->getType()->isIntOrIntVectorTy(); }),
3267 "GEP indexes must be integers", &GEP);
3268 Type *ElTy =
3269 GetElementPtrInst::getIndexedType(GEP.getSourceElementType(), Idxs);
3270 Assert(ElTy, "Invalid indices for GEP pointer type!", &GEP);
3272 Assert(GEP.getType()->isPtrOrPtrVectorTy() &&
3273 GEP.getResultElementType() == ElTy,
3274 "GEP is not of right type for indices!", &GEP, ElTy);
3276 if (GEP.getType()->isVectorTy()) {
3277 // Additional checks for vector GEPs.
3278 unsigned GEPWidth = GEP.getType()->getVectorNumElements();
3279 if (GEP.getPointerOperandType()->isVectorTy())
3280 Assert(GEPWidth == GEP.getPointerOperandType()->getVectorNumElements(),
3281 "Vector GEP result width doesn't match operand's", &GEP);
3282 for (Value *Idx : Idxs) {
3283 Type *IndexTy = Idx->getType();
3284 if (IndexTy->isVectorTy()) {
3285 unsigned IndexWidth = IndexTy->getVectorNumElements();
3286 Assert(IndexWidth == GEPWidth, "Invalid GEP index vector width", &GEP);
3288 Assert(IndexTy->isIntOrIntVectorTy(),
3289 "All GEP indices should be of integer type");
3293 if (auto *PTy = dyn_cast<PointerType>(GEP.getType())) {
3294 Assert(GEP.getAddressSpace() == PTy->getAddressSpace(),
3295 "GEP address space doesn't match type", &GEP);
3298 visitInstruction(GEP);
3301 static bool isContiguous(const ConstantRange &A, const ConstantRange &B) {
3302 return A.getUpper() == B.getLower() || A.getLower() == B.getUpper();
3305 void Verifier::visitRangeMetadata(Instruction &I, MDNode *Range, Type *Ty) {
3306 assert(Range && Range == I.getMetadata(LLVMContext::MD_range) &&
3307 "precondition violation");
3309 unsigned NumOperands = Range->getNumOperands();
3310 Assert(NumOperands % 2 == 0, "Unfinished range!", Range);
3311 unsigned NumRanges = NumOperands / 2;
3312 Assert(NumRanges >= 1, "It should have at least one range!", Range);
3314 ConstantRange LastRange(1, true); // Dummy initial value
3315 for (unsigned i = 0; i < NumRanges; ++i) {
3316 ConstantInt *Low =
3317 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i));
3318 Assert(Low, "The lower limit must be an integer!", Low);
3319 ConstantInt *High =
3320 mdconst::dyn_extract<ConstantInt>(Range->getOperand(2 * i + 1));
3321 Assert(High, "The upper limit must be an integer!", High);
3322 Assert(High->getType() == Low->getType() && High->getType() == Ty,
3323 "Range types must match instruction type!", &I);
3325 APInt HighV = High->getValue();
3326 APInt LowV = Low->getValue();
3327 ConstantRange CurRange(LowV, HighV);
3328 Assert(!CurRange.isEmptySet() && !CurRange.isFullSet(),
3329 "Range must not be empty!", Range);
3330 if (i != 0) {
3331 Assert(CurRange.intersectWith(LastRange).isEmptySet(),
3332 "Intervals are overlapping", Range);
3333 Assert(LowV.sgt(LastRange.getLower()), "Intervals are not in order",
3334 Range);
3335 Assert(!isContiguous(CurRange, LastRange), "Intervals are contiguous",
3336 Range);
3338 LastRange = ConstantRange(LowV, HighV);
3340 if (NumRanges > 2) {
3341 APInt FirstLow =
3342 mdconst::dyn_extract<ConstantInt>(Range->getOperand(0))->getValue();
3343 APInt FirstHigh =
3344 mdconst::dyn_extract<ConstantInt>(Range->getOperand(1))->getValue();
3345 ConstantRange FirstRange(FirstLow, FirstHigh);
3346 Assert(FirstRange.intersectWith(LastRange).isEmptySet(),
3347 "Intervals are overlapping", Range);
3348 Assert(!isContiguous(FirstRange, LastRange), "Intervals are contiguous",
3349 Range);
3353 void Verifier::checkAtomicMemAccessSize(Type *Ty, const Instruction *I) {
3354 unsigned Size = DL.getTypeSizeInBits(Ty);
3355 Assert(Size >= 8, "atomic memory access' size must be byte-sized", Ty, I);
3356 Assert(!(Size & (Size - 1)),
3357 "atomic memory access' operand must have a power-of-two size", Ty, I);
3360 void Verifier::visitLoadInst(LoadInst &LI) {
3361 PointerType *PTy = dyn_cast<PointerType>(LI.getOperand(0)->getType());
3362 Assert(PTy, "Load operand must be a pointer.", &LI);
3363 Type *ElTy = LI.getType();
3364 Assert(LI.getAlignment() <= Value::MaximumAlignment,
3365 "huge alignment values are unsupported", &LI);
3366 Assert(ElTy->isSized(), "loading unsized types is not allowed", &LI);
3367 if (LI.isAtomic()) {
3368 Assert(LI.getOrdering() != AtomicOrdering::Release &&
3369 LI.getOrdering() != AtomicOrdering::AcquireRelease,
3370 "Load cannot have Release ordering", &LI);
3371 Assert(LI.getAlignment() != 0,
3372 "Atomic load must specify explicit alignment", &LI);
3373 Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3374 "atomic load operand must have integer, pointer, or floating point "
3375 "type!",
3376 ElTy, &LI);
3377 checkAtomicMemAccessSize(ElTy, &LI);
3378 } else {
3379 Assert(LI.getSyncScopeID() == SyncScope::System,
3380 "Non-atomic load cannot have SynchronizationScope specified", &LI);
3383 visitInstruction(LI);
3386 void Verifier::visitStoreInst(StoreInst &SI) {
3387 PointerType *PTy = dyn_cast<PointerType>(SI.getOperand(1)->getType());
3388 Assert(PTy, "Store operand must be a pointer.", &SI);
3389 Type *ElTy = PTy->getElementType();
3390 Assert(ElTy == SI.getOperand(0)->getType(),
3391 "Stored value type does not match pointer operand type!", &SI, ElTy);
3392 Assert(SI.getAlignment() <= Value::MaximumAlignment,
3393 "huge alignment values are unsupported", &SI);
3394 Assert(ElTy->isSized(), "storing unsized types is not allowed", &SI);
3395 if (SI.isAtomic()) {
3396 Assert(SI.getOrdering() != AtomicOrdering::Acquire &&
3397 SI.getOrdering() != AtomicOrdering::AcquireRelease,
3398 "Store cannot have Acquire ordering", &SI);
3399 Assert(SI.getAlignment() != 0,
3400 "Atomic store must specify explicit alignment", &SI);
3401 Assert(ElTy->isIntOrPtrTy() || ElTy->isFloatingPointTy(),
3402 "atomic store operand must have integer, pointer, or floating point "
3403 "type!",
3404 ElTy, &SI);
3405 checkAtomicMemAccessSize(ElTy, &SI);
3406 } else {
3407 Assert(SI.getSyncScopeID() == SyncScope::System,
3408 "Non-atomic store cannot have SynchronizationScope specified", &SI);
3410 visitInstruction(SI);
3413 /// Check that SwiftErrorVal is used as a swifterror argument in CS.
3414 void Verifier::verifySwiftErrorCall(CallBase &Call,
3415 const Value *SwiftErrorVal) {
3416 unsigned Idx = 0;
3417 for (auto I = Call.arg_begin(), E = Call.arg_end(); I != E; ++I, ++Idx) {
3418 if (*I == SwiftErrorVal) {
3419 Assert(Call.paramHasAttr(Idx, Attribute::SwiftError),
3420 "swifterror value when used in a callsite should be marked "
3421 "with swifterror attribute",
3422 SwiftErrorVal, Call);
3427 void Verifier::verifySwiftErrorValue(const Value *SwiftErrorVal) {
3428 // Check that swifterror value is only used by loads, stores, or as
3429 // a swifterror argument.
3430 for (const User *U : SwiftErrorVal->users()) {
3431 Assert(isa<LoadInst>(U) || isa<StoreInst>(U) || isa<CallInst>(U) ||
3432 isa<InvokeInst>(U),
3433 "swifterror value can only be loaded and stored from, or "
3434 "as a swifterror argument!",
3435 SwiftErrorVal, U);
3436 // If it is used by a store, check it is the second operand.
3437 if (auto StoreI = dyn_cast<StoreInst>(U))
3438 Assert(StoreI->getOperand(1) == SwiftErrorVal,
3439 "swifterror value should be the second operand when used "
3440 "by stores", SwiftErrorVal, U);
3441 if (auto *Call = dyn_cast<CallBase>(U))
3442 verifySwiftErrorCall(*const_cast<CallBase *>(Call), SwiftErrorVal);
3446 void Verifier::visitAllocaInst(AllocaInst &AI) {
3447 SmallPtrSet<Type*, 4> Visited;
3448 PointerType *PTy = AI.getType();
3449 // TODO: Relax this restriction?
3450 Assert(PTy->getAddressSpace() == DL.getAllocaAddrSpace(),
3451 "Allocation instruction pointer not in the stack address space!",
3452 &AI);
3453 Assert(AI.getAllocatedType()->isSized(&Visited),
3454 "Cannot allocate unsized type", &AI);
3455 Assert(AI.getArraySize()->getType()->isIntegerTy(),
3456 "Alloca array size must have integer type", &AI);
3457 Assert(AI.getAlignment() <= Value::MaximumAlignment,
3458 "huge alignment values are unsupported", &AI);
3460 if (AI.isSwiftError()) {
3461 verifySwiftErrorValue(&AI);
3464 visitInstruction(AI);
3467 void Verifier::visitAtomicCmpXchgInst(AtomicCmpXchgInst &CXI) {
3469 // FIXME: more conditions???
3470 Assert(CXI.getSuccessOrdering() != AtomicOrdering::NotAtomic,
3471 "cmpxchg instructions must be atomic.", &CXI);
3472 Assert(CXI.getFailureOrdering() != AtomicOrdering::NotAtomic,
3473 "cmpxchg instructions must be atomic.", &CXI);
3474 Assert(CXI.getSuccessOrdering() != AtomicOrdering::Unordered,
3475 "cmpxchg instructions cannot be unordered.", &CXI);
3476 Assert(CXI.getFailureOrdering() != AtomicOrdering::Unordered,
3477 "cmpxchg instructions cannot be unordered.", &CXI);
3478 Assert(!isStrongerThan(CXI.getFailureOrdering(), CXI.getSuccessOrdering()),
3479 "cmpxchg instructions failure argument shall be no stronger than the "
3480 "success argument",
3481 &CXI);
3482 Assert(CXI.getFailureOrdering() != AtomicOrdering::Release &&
3483 CXI.getFailureOrdering() != AtomicOrdering::AcquireRelease,
3484 "cmpxchg failure ordering cannot include release semantics", &CXI);
3486 PointerType *PTy = dyn_cast<PointerType>(CXI.getOperand(0)->getType());
3487 Assert(PTy, "First cmpxchg operand must be a pointer.", &CXI);
3488 Type *ElTy = PTy->getElementType();
3489 Assert(ElTy->isIntOrPtrTy(),
3490 "cmpxchg operand must have integer or pointer type", ElTy, &CXI);
3491 checkAtomicMemAccessSize(ElTy, &CXI);
3492 Assert(ElTy == CXI.getOperand(1)->getType(),
3493 "Expected value type does not match pointer operand type!", &CXI,
3494 ElTy);
3495 Assert(ElTy == CXI.getOperand(2)->getType(),
3496 "Stored value type does not match pointer operand type!", &CXI, ElTy);
3497 visitInstruction(CXI);
3500 void Verifier::visitAtomicRMWInst(AtomicRMWInst &RMWI) {
3501 Assert(RMWI.getOrdering() != AtomicOrdering::NotAtomic,
3502 "atomicrmw instructions must be atomic.", &RMWI);
3503 Assert(RMWI.getOrdering() != AtomicOrdering::Unordered,
3504 "atomicrmw instructions cannot be unordered.", &RMWI);
3505 auto Op = RMWI.getOperation();
3506 PointerType *PTy = dyn_cast<PointerType>(RMWI.getOperand(0)->getType());
3507 Assert(PTy, "First atomicrmw operand must be a pointer.", &RMWI);
3508 Type *ElTy = PTy->getElementType();
3509 if (Op == AtomicRMWInst::Xchg) {
3510 Assert(ElTy->isIntegerTy() || ElTy->isFloatingPointTy(), "atomicrmw " +
3511 AtomicRMWInst::getOperationName(Op) +
3512 " operand must have integer or floating point type!",
3513 &RMWI, ElTy);
3514 } else if (AtomicRMWInst::isFPOperation(Op)) {
3515 Assert(ElTy->isFloatingPointTy(), "atomicrmw " +
3516 AtomicRMWInst::getOperationName(Op) +
3517 " operand must have floating point type!",
3518 &RMWI, ElTy);
3519 } else {
3520 Assert(ElTy->isIntegerTy(), "atomicrmw " +
3521 AtomicRMWInst::getOperationName(Op) +
3522 " operand must have integer type!",
3523 &RMWI, ElTy);
3525 checkAtomicMemAccessSize(ElTy, &RMWI);
3526 Assert(ElTy == RMWI.getOperand(1)->getType(),
3527 "Argument value type does not match pointer operand type!", &RMWI,
3528 ElTy);
3529 Assert(AtomicRMWInst::FIRST_BINOP <= Op && Op <= AtomicRMWInst::LAST_BINOP,
3530 "Invalid binary operation!", &RMWI);
3531 visitInstruction(RMWI);
3534 void Verifier::visitFenceInst(FenceInst &FI) {
3535 const AtomicOrdering Ordering = FI.getOrdering();
3536 Assert(Ordering == AtomicOrdering::Acquire ||
3537 Ordering == AtomicOrdering::Release ||
3538 Ordering == AtomicOrdering::AcquireRelease ||
3539 Ordering == AtomicOrdering::SequentiallyConsistent,
3540 "fence instructions may only have acquire, release, acq_rel, or "
3541 "seq_cst ordering.",
3542 &FI);
3543 visitInstruction(FI);
3546 void Verifier::visitExtractValueInst(ExtractValueInst &EVI) {
3547 Assert(ExtractValueInst::getIndexedType(EVI.getAggregateOperand()->getType(),
3548 EVI.getIndices()) == EVI.getType(),
3549 "Invalid ExtractValueInst operands!", &EVI);
3551 visitInstruction(EVI);
3554 void Verifier::visitInsertValueInst(InsertValueInst &IVI) {
3555 Assert(ExtractValueInst::getIndexedType(IVI.getAggregateOperand()->getType(),
3556 IVI.getIndices()) ==
3557 IVI.getOperand(1)->getType(),
3558 "Invalid InsertValueInst operands!", &IVI);
3560 visitInstruction(IVI);
3563 static Value *getParentPad(Value *EHPad) {
3564 if (auto *FPI = dyn_cast<FuncletPadInst>(EHPad))
3565 return FPI->getParentPad();
3567 return cast<CatchSwitchInst>(EHPad)->getParentPad();
3570 void Verifier::visitEHPadPredecessors(Instruction &I) {
3571 assert(I.isEHPad());
3573 BasicBlock *BB = I.getParent();
3574 Function *F = BB->getParent();
3576 Assert(BB != &F->getEntryBlock(), "EH pad cannot be in entry block.", &I);
3578 if (auto *LPI = dyn_cast<LandingPadInst>(&I)) {
3579 // The landingpad instruction defines its parent as a landing pad block. The
3580 // landing pad block may be branched to only by the unwind edge of an
3581 // invoke.
3582 for (BasicBlock *PredBB : predecessors(BB)) {
3583 const auto *II = dyn_cast<InvokeInst>(PredBB->getTerminator());
3584 Assert(II && II->getUnwindDest() == BB && II->getNormalDest() != BB,
3585 "Block containing LandingPadInst must be jumped to "
3586 "only by the unwind edge of an invoke.",
3587 LPI);
3589 return;
3591 if (auto *CPI = dyn_cast<CatchPadInst>(&I)) {
3592 if (!pred_empty(BB))
3593 Assert(BB->getUniquePredecessor() == CPI->getCatchSwitch()->getParent(),
3594 "Block containg CatchPadInst must be jumped to "
3595 "only by its catchswitch.",
3596 CPI);
3597 Assert(BB != CPI->getCatchSwitch()->getUnwindDest(),
3598 "Catchswitch cannot unwind to one of its catchpads",
3599 CPI->getCatchSwitch(), CPI);
3600 return;
3603 // Verify that each pred has a legal terminator with a legal to/from EH
3604 // pad relationship.
3605 Instruction *ToPad = &I;
3606 Value *ToPadParent = getParentPad(ToPad);
3607 for (BasicBlock *PredBB : predecessors(BB)) {
3608 Instruction *TI = PredBB->getTerminator();
3609 Value *FromPad;
3610 if (auto *II = dyn_cast<InvokeInst>(TI)) {
3611 Assert(II->getUnwindDest() == BB && II->getNormalDest() != BB,
3612 "EH pad must be jumped to via an unwind edge", ToPad, II);
3613 if (auto Bundle = II->getOperandBundle(LLVMContext::OB_funclet))
3614 FromPad = Bundle->Inputs[0];
3615 else
3616 FromPad = ConstantTokenNone::get(II->getContext());
3617 } else if (auto *CRI = dyn_cast<CleanupReturnInst>(TI)) {
3618 FromPad = CRI->getOperand(0);
3619 Assert(FromPad != ToPadParent, "A cleanupret must exit its cleanup", CRI);
3620 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(TI)) {
3621 FromPad = CSI;
3622 } else {
3623 Assert(false, "EH pad must be jumped to via an unwind edge", ToPad, TI);
3626 // The edge may exit from zero or more nested pads.
3627 SmallSet<Value *, 8> Seen;
3628 for (;; FromPad = getParentPad(FromPad)) {
3629 Assert(FromPad != ToPad,
3630 "EH pad cannot handle exceptions raised within it", FromPad, TI);
3631 if (FromPad == ToPadParent) {
3632 // This is a legal unwind edge.
3633 break;
3635 Assert(!isa<ConstantTokenNone>(FromPad),
3636 "A single unwind edge may only enter one EH pad", TI);
3637 Assert(Seen.insert(FromPad).second,
3638 "EH pad jumps through a cycle of pads", FromPad);
3643 void Verifier::visitLandingPadInst(LandingPadInst &LPI) {
3644 // The landingpad instruction is ill-formed if it doesn't have any clauses and
3645 // isn't a cleanup.
3646 Assert(LPI.getNumClauses() > 0 || LPI.isCleanup(),
3647 "LandingPadInst needs at least one clause or to be a cleanup.", &LPI);
3649 visitEHPadPredecessors(LPI);
3651 if (!LandingPadResultTy)
3652 LandingPadResultTy = LPI.getType();
3653 else
3654 Assert(LandingPadResultTy == LPI.getType(),
3655 "The landingpad instruction should have a consistent result type "
3656 "inside a function.",
3657 &LPI);
3659 Function *F = LPI.getParent()->getParent();
3660 Assert(F->hasPersonalityFn(),
3661 "LandingPadInst needs to be in a function with a personality.", &LPI);
3663 // The landingpad instruction must be the first non-PHI instruction in the
3664 // block.
3665 Assert(LPI.getParent()->getLandingPadInst() == &LPI,
3666 "LandingPadInst not the first non-PHI instruction in the block.",
3667 &LPI);
3669 for (unsigned i = 0, e = LPI.getNumClauses(); i < e; ++i) {
3670 Constant *Clause = LPI.getClause(i);
3671 if (LPI.isCatch(i)) {
3672 Assert(isa<PointerType>(Clause->getType()),
3673 "Catch operand does not have pointer type!", &LPI);
3674 } else {
3675 Assert(LPI.isFilter(i), "Clause is neither catch nor filter!", &LPI);
3676 Assert(isa<ConstantArray>(Clause) || isa<ConstantAggregateZero>(Clause),
3677 "Filter operand is not an array of constants!", &LPI);
3681 visitInstruction(LPI);
3684 void Verifier::visitResumeInst(ResumeInst &RI) {
3685 Assert(RI.getFunction()->hasPersonalityFn(),
3686 "ResumeInst needs to be in a function with a personality.", &RI);
3688 if (!LandingPadResultTy)
3689 LandingPadResultTy = RI.getValue()->getType();
3690 else
3691 Assert(LandingPadResultTy == RI.getValue()->getType(),
3692 "The resume instruction should have a consistent result type "
3693 "inside a function.",
3694 &RI);
3696 visitTerminator(RI);
3699 void Verifier::visitCatchPadInst(CatchPadInst &CPI) {
3700 BasicBlock *BB = CPI.getParent();
3702 Function *F = BB->getParent();
3703 Assert(F->hasPersonalityFn(),
3704 "CatchPadInst needs to be in a function with a personality.", &CPI);
3706 Assert(isa<CatchSwitchInst>(CPI.getParentPad()),
3707 "CatchPadInst needs to be directly nested in a CatchSwitchInst.",
3708 CPI.getParentPad());
3710 // The catchpad instruction must be the first non-PHI instruction in the
3711 // block.
3712 Assert(BB->getFirstNonPHI() == &CPI,
3713 "CatchPadInst not the first non-PHI instruction in the block.", &CPI);
3715 visitEHPadPredecessors(CPI);
3716 visitFuncletPadInst(CPI);
3719 void Verifier::visitCatchReturnInst(CatchReturnInst &CatchReturn) {
3720 Assert(isa<CatchPadInst>(CatchReturn.getOperand(0)),
3721 "CatchReturnInst needs to be provided a CatchPad", &CatchReturn,
3722 CatchReturn.getOperand(0));
3724 visitTerminator(CatchReturn);
3727 void Verifier::visitCleanupPadInst(CleanupPadInst &CPI) {
3728 BasicBlock *BB = CPI.getParent();
3730 Function *F = BB->getParent();
3731 Assert(F->hasPersonalityFn(),
3732 "CleanupPadInst needs to be in a function with a personality.", &CPI);
3734 // The cleanuppad instruction must be the first non-PHI instruction in the
3735 // block.
3736 Assert(BB->getFirstNonPHI() == &CPI,
3737 "CleanupPadInst not the first non-PHI instruction in the block.",
3738 &CPI);
3740 auto *ParentPad = CPI.getParentPad();
3741 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3742 "CleanupPadInst has an invalid parent.", &CPI);
3744 visitEHPadPredecessors(CPI);
3745 visitFuncletPadInst(CPI);
3748 void Verifier::visitFuncletPadInst(FuncletPadInst &FPI) {
3749 User *FirstUser = nullptr;
3750 Value *FirstUnwindPad = nullptr;
3751 SmallVector<FuncletPadInst *, 8> Worklist({&FPI});
3752 SmallSet<FuncletPadInst *, 8> Seen;
3754 while (!Worklist.empty()) {
3755 FuncletPadInst *CurrentPad = Worklist.pop_back_val();
3756 Assert(Seen.insert(CurrentPad).second,
3757 "FuncletPadInst must not be nested within itself", CurrentPad);
3758 Value *UnresolvedAncestorPad = nullptr;
3759 for (User *U : CurrentPad->users()) {
3760 BasicBlock *UnwindDest;
3761 if (auto *CRI = dyn_cast<CleanupReturnInst>(U)) {
3762 UnwindDest = CRI->getUnwindDest();
3763 } else if (auto *CSI = dyn_cast<CatchSwitchInst>(U)) {
3764 // We allow catchswitch unwind to caller to nest
3765 // within an outer pad that unwinds somewhere else,
3766 // because catchswitch doesn't have a nounwind variant.
3767 // See e.g. SimplifyCFGOpt::SimplifyUnreachable.
3768 if (CSI->unwindsToCaller())
3769 continue;
3770 UnwindDest = CSI->getUnwindDest();
3771 } else if (auto *II = dyn_cast<InvokeInst>(U)) {
3772 UnwindDest = II->getUnwindDest();
3773 } else if (isa<CallInst>(U)) {
3774 // Calls which don't unwind may be found inside funclet
3775 // pads that unwind somewhere else. We don't *require*
3776 // such calls to be annotated nounwind.
3777 continue;
3778 } else if (auto *CPI = dyn_cast<CleanupPadInst>(U)) {
3779 // The unwind dest for a cleanup can only be found by
3780 // recursive search. Add it to the worklist, and we'll
3781 // search for its first use that determines where it unwinds.
3782 Worklist.push_back(CPI);
3783 continue;
3784 } else {
3785 Assert(isa<CatchReturnInst>(U), "Bogus funclet pad use", U);
3786 continue;
3789 Value *UnwindPad;
3790 bool ExitsFPI;
3791 if (UnwindDest) {
3792 UnwindPad = UnwindDest->getFirstNonPHI();
3793 if (!cast<Instruction>(UnwindPad)->isEHPad())
3794 continue;
3795 Value *UnwindParent = getParentPad(UnwindPad);
3796 // Ignore unwind edges that don't exit CurrentPad.
3797 if (UnwindParent == CurrentPad)
3798 continue;
3799 // Determine whether the original funclet pad is exited,
3800 // and if we are scanning nested pads determine how many
3801 // of them are exited so we can stop searching their
3802 // children.
3803 Value *ExitedPad = CurrentPad;
3804 ExitsFPI = false;
3805 do {
3806 if (ExitedPad == &FPI) {
3807 ExitsFPI = true;
3808 // Now we can resolve any ancestors of CurrentPad up to
3809 // FPI, but not including FPI since we need to make sure
3810 // to check all direct users of FPI for consistency.
3811 UnresolvedAncestorPad = &FPI;
3812 break;
3814 Value *ExitedParent = getParentPad(ExitedPad);
3815 if (ExitedParent == UnwindParent) {
3816 // ExitedPad is the ancestor-most pad which this unwind
3817 // edge exits, so we can resolve up to it, meaning that
3818 // ExitedParent is the first ancestor still unresolved.
3819 UnresolvedAncestorPad = ExitedParent;
3820 break;
3822 ExitedPad = ExitedParent;
3823 } while (!isa<ConstantTokenNone>(ExitedPad));
3824 } else {
3825 // Unwinding to caller exits all pads.
3826 UnwindPad = ConstantTokenNone::get(FPI.getContext());
3827 ExitsFPI = true;
3828 UnresolvedAncestorPad = &FPI;
3831 if (ExitsFPI) {
3832 // This unwind edge exits FPI. Make sure it agrees with other
3833 // such edges.
3834 if (FirstUser) {
3835 Assert(UnwindPad == FirstUnwindPad, "Unwind edges out of a funclet "
3836 "pad must have the same unwind "
3837 "dest",
3838 &FPI, U, FirstUser);
3839 } else {
3840 FirstUser = U;
3841 FirstUnwindPad = UnwindPad;
3842 // Record cleanup sibling unwinds for verifySiblingFuncletUnwinds
3843 if (isa<CleanupPadInst>(&FPI) && !isa<ConstantTokenNone>(UnwindPad) &&
3844 getParentPad(UnwindPad) == getParentPad(&FPI))
3845 SiblingFuncletInfo[&FPI] = cast<Instruction>(U);
3848 // Make sure we visit all uses of FPI, but for nested pads stop as
3849 // soon as we know where they unwind to.
3850 if (CurrentPad != &FPI)
3851 break;
3853 if (UnresolvedAncestorPad) {
3854 if (CurrentPad == UnresolvedAncestorPad) {
3855 // When CurrentPad is FPI itself, we don't mark it as resolved even if
3856 // we've found an unwind edge that exits it, because we need to verify
3857 // all direct uses of FPI.
3858 assert(CurrentPad == &FPI);
3859 continue;
3861 // Pop off the worklist any nested pads that we've found an unwind
3862 // destination for. The pads on the worklist are the uncles,
3863 // great-uncles, etc. of CurrentPad. We've found an unwind destination
3864 // for all ancestors of CurrentPad up to but not including
3865 // UnresolvedAncestorPad.
3866 Value *ResolvedPad = CurrentPad;
3867 while (!Worklist.empty()) {
3868 Value *UnclePad = Worklist.back();
3869 Value *AncestorPad = getParentPad(UnclePad);
3870 // Walk ResolvedPad up the ancestor list until we either find the
3871 // uncle's parent or the last resolved ancestor.
3872 while (ResolvedPad != AncestorPad) {
3873 Value *ResolvedParent = getParentPad(ResolvedPad);
3874 if (ResolvedParent == UnresolvedAncestorPad) {
3875 break;
3877 ResolvedPad = ResolvedParent;
3879 // If the resolved ancestor search didn't find the uncle's parent,
3880 // then the uncle is not yet resolved.
3881 if (ResolvedPad != AncestorPad)
3882 break;
3883 // This uncle is resolved, so pop it from the worklist.
3884 Worklist.pop_back();
3889 if (FirstUnwindPad) {
3890 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(FPI.getParentPad())) {
3891 BasicBlock *SwitchUnwindDest = CatchSwitch->getUnwindDest();
3892 Value *SwitchUnwindPad;
3893 if (SwitchUnwindDest)
3894 SwitchUnwindPad = SwitchUnwindDest->getFirstNonPHI();
3895 else
3896 SwitchUnwindPad = ConstantTokenNone::get(FPI.getContext());
3897 Assert(SwitchUnwindPad == FirstUnwindPad,
3898 "Unwind edges out of a catch must have the same unwind dest as "
3899 "the parent catchswitch",
3900 &FPI, FirstUser, CatchSwitch);
3904 visitInstruction(FPI);
3907 void Verifier::visitCatchSwitchInst(CatchSwitchInst &CatchSwitch) {
3908 BasicBlock *BB = CatchSwitch.getParent();
3910 Function *F = BB->getParent();
3911 Assert(F->hasPersonalityFn(),
3912 "CatchSwitchInst needs to be in a function with a personality.",
3913 &CatchSwitch);
3915 // The catchswitch instruction must be the first non-PHI instruction in the
3916 // block.
3917 Assert(BB->getFirstNonPHI() == &CatchSwitch,
3918 "CatchSwitchInst not the first non-PHI instruction in the block.",
3919 &CatchSwitch);
3921 auto *ParentPad = CatchSwitch.getParentPad();
3922 Assert(isa<ConstantTokenNone>(ParentPad) || isa<FuncletPadInst>(ParentPad),
3923 "CatchSwitchInst has an invalid parent.", ParentPad);
3925 if (BasicBlock *UnwindDest = CatchSwitch.getUnwindDest()) {
3926 Instruction *I = UnwindDest->getFirstNonPHI();
3927 Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3928 "CatchSwitchInst must unwind to an EH block which is not a "
3929 "landingpad.",
3930 &CatchSwitch);
3932 // Record catchswitch sibling unwinds for verifySiblingFuncletUnwinds
3933 if (getParentPad(I) == ParentPad)
3934 SiblingFuncletInfo[&CatchSwitch] = &CatchSwitch;
3937 Assert(CatchSwitch.getNumHandlers() != 0,
3938 "CatchSwitchInst cannot have empty handler list", &CatchSwitch);
3940 for (BasicBlock *Handler : CatchSwitch.handlers()) {
3941 Assert(isa<CatchPadInst>(Handler->getFirstNonPHI()),
3942 "CatchSwitchInst handlers must be catchpads", &CatchSwitch, Handler);
3945 visitEHPadPredecessors(CatchSwitch);
3946 visitTerminator(CatchSwitch);
3949 void Verifier::visitCleanupReturnInst(CleanupReturnInst &CRI) {
3950 Assert(isa<CleanupPadInst>(CRI.getOperand(0)),
3951 "CleanupReturnInst needs to be provided a CleanupPad", &CRI,
3952 CRI.getOperand(0));
3954 if (BasicBlock *UnwindDest = CRI.getUnwindDest()) {
3955 Instruction *I = UnwindDest->getFirstNonPHI();
3956 Assert(I->isEHPad() && !isa<LandingPadInst>(I),
3957 "CleanupReturnInst must unwind to an EH block which is not a "
3958 "landingpad.",
3959 &CRI);
3962 visitTerminator(CRI);
3965 void Verifier::verifyDominatesUse(Instruction &I, unsigned i) {
3966 Instruction *Op = cast<Instruction>(I.getOperand(i));
3967 // If the we have an invalid invoke, don't try to compute the dominance.
3968 // We already reject it in the invoke specific checks and the dominance
3969 // computation doesn't handle multiple edges.
3970 if (InvokeInst *II = dyn_cast<InvokeInst>(Op)) {
3971 if (II->getNormalDest() == II->getUnwindDest())
3972 return;
3975 // Quick check whether the def has already been encountered in the same block.
3976 // PHI nodes are not checked to prevent accepting preceding PHIs, because PHI
3977 // uses are defined to happen on the incoming edge, not at the instruction.
3979 // FIXME: If this operand is a MetadataAsValue (wrapping a LocalAsMetadata)
3980 // wrapping an SSA value, assert that we've already encountered it. See
3981 // related FIXME in Mapper::mapLocalAsMetadata in ValueMapper.cpp.
3982 if (!isa<PHINode>(I) && InstsInThisBlock.count(Op))
3983 return;
3985 const Use &U = I.getOperandUse(i);
3986 Assert(DT.dominates(Op, U),
3987 "Instruction does not dominate all uses!", Op, &I);
3990 void Verifier::visitDereferenceableMetadata(Instruction& I, MDNode* MD) {
3991 Assert(I.getType()->isPointerTy(), "dereferenceable, dereferenceable_or_null "
3992 "apply only to pointer types", &I);
3993 Assert((isa<LoadInst>(I) || isa<IntToPtrInst>(I)),
3994 "dereferenceable, dereferenceable_or_null apply only to load"
3995 " and inttoptr instructions, use attributes for calls or invokes", &I);
3996 Assert(MD->getNumOperands() == 1, "dereferenceable, dereferenceable_or_null "
3997 "take one operand!", &I);
3998 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(MD->getOperand(0));
3999 Assert(CI && CI->getType()->isIntegerTy(64), "dereferenceable, "
4000 "dereferenceable_or_null metadata value must be an i64!", &I);
4003 void Verifier::visitProfMetadata(Instruction &I, MDNode *MD) {
4004 Assert(MD->getNumOperands() >= 2,
4005 "!prof annotations should have no less than 2 operands", MD);
4007 // Check first operand.
4008 Assert(MD->getOperand(0) != nullptr, "first operand should not be null", MD);
4009 Assert(isa<MDString>(MD->getOperand(0)),
4010 "expected string with name of the !prof annotation", MD);
4011 MDString *MDS = cast<MDString>(MD->getOperand(0));
4012 StringRef ProfName = MDS->getString();
4014 // Check consistency of !prof branch_weights metadata.
4015 if (ProfName.equals("branch_weights")) {
4016 unsigned ExpectedNumOperands = 0;
4017 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
4018 ExpectedNumOperands = BI->getNumSuccessors();
4019 else if (SwitchInst *SI = dyn_cast<SwitchInst>(&I))
4020 ExpectedNumOperands = SI->getNumSuccessors();
4021 else if (isa<CallInst>(&I) || isa<InvokeInst>(&I))
4022 ExpectedNumOperands = 1;
4023 else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(&I))
4024 ExpectedNumOperands = IBI->getNumDestinations();
4025 else if (isa<SelectInst>(&I))
4026 ExpectedNumOperands = 2;
4027 else
4028 CheckFailed("!prof branch_weights are not allowed for this instruction",
4029 MD);
4031 Assert(MD->getNumOperands() == 1 + ExpectedNumOperands,
4032 "Wrong number of operands", MD);
4033 for (unsigned i = 1; i < MD->getNumOperands(); ++i) {
4034 auto &MDO = MD->getOperand(i);
4035 Assert(MDO, "second operand should not be null", MD);
4036 Assert(mdconst::dyn_extract<ConstantInt>(MDO),
4037 "!prof brunch_weights operand is not a const int");
4042 /// verifyInstruction - Verify that an instruction is well formed.
4044 void Verifier::visitInstruction(Instruction &I) {
4045 BasicBlock *BB = I.getParent();
4046 Assert(BB, "Instruction not embedded in basic block!", &I);
4048 if (!isa<PHINode>(I)) { // Check that non-phi nodes are not self referential
4049 for (User *U : I.users()) {
4050 Assert(U != (User *)&I || !DT.isReachableFromEntry(BB),
4051 "Only PHI nodes may reference their own value!", &I);
4055 // Check that void typed values don't have names
4056 Assert(!I.getType()->isVoidTy() || !I.hasName(),
4057 "Instruction has a name, but provides a void value!", &I);
4059 // Check that the return value of the instruction is either void or a legal
4060 // value type.
4061 Assert(I.getType()->isVoidTy() || I.getType()->isFirstClassType(),
4062 "Instruction returns a non-scalar type!", &I);
4064 // Check that the instruction doesn't produce metadata. Calls are already
4065 // checked against the callee type.
4066 Assert(!I.getType()->isMetadataTy() || isa<CallInst>(I) || isa<InvokeInst>(I),
4067 "Invalid use of metadata!", &I);
4069 // Check that all uses of the instruction, if they are instructions
4070 // themselves, actually have parent basic blocks. If the use is not an
4071 // instruction, it is an error!
4072 for (Use &U : I.uses()) {
4073 if (Instruction *Used = dyn_cast<Instruction>(U.getUser()))
4074 Assert(Used->getParent() != nullptr,
4075 "Instruction referencing"
4076 " instruction not embedded in a basic block!",
4077 &I, Used);
4078 else {
4079 CheckFailed("Use of instruction is not an instruction!", U);
4080 return;
4084 // Get a pointer to the call base of the instruction if it is some form of
4085 // call.
4086 const CallBase *CBI = dyn_cast<CallBase>(&I);
4088 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
4089 Assert(I.getOperand(i) != nullptr, "Instruction has null operand!", &I);
4091 // Check to make sure that only first-class-values are operands to
4092 // instructions.
4093 if (!I.getOperand(i)->getType()->isFirstClassType()) {
4094 Assert(false, "Instruction operands must be first-class values!", &I);
4097 if (Function *F = dyn_cast<Function>(I.getOperand(i))) {
4098 // Check to make sure that the "address of" an intrinsic function is never
4099 // taken.
4100 Assert(!F->isIntrinsic() ||
4101 (CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i)),
4102 "Cannot take the address of an intrinsic!", &I);
4103 Assert(
4104 !F->isIntrinsic() || isa<CallInst>(I) ||
4105 F->getIntrinsicID() == Intrinsic::donothing ||
4106 F->getIntrinsicID() == Intrinsic::coro_resume ||
4107 F->getIntrinsicID() == Intrinsic::coro_destroy ||
4108 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_void ||
4109 F->getIntrinsicID() == Intrinsic::experimental_patchpoint_i64 ||
4110 F->getIntrinsicID() == Intrinsic::experimental_gc_statepoint ||
4111 F->getIntrinsicID() == Intrinsic::wasm_rethrow_in_catch,
4112 "Cannot invoke an intrinsic other than donothing, patchpoint, "
4113 "statepoint, coro_resume or coro_destroy",
4114 &I);
4115 Assert(F->getParent() == &M, "Referencing function in another module!",
4116 &I, &M, F, F->getParent());
4117 } else if (BasicBlock *OpBB = dyn_cast<BasicBlock>(I.getOperand(i))) {
4118 Assert(OpBB->getParent() == BB->getParent(),
4119 "Referring to a basic block in another function!", &I);
4120 } else if (Argument *OpArg = dyn_cast<Argument>(I.getOperand(i))) {
4121 Assert(OpArg->getParent() == BB->getParent(),
4122 "Referring to an argument in another function!", &I);
4123 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(I.getOperand(i))) {
4124 Assert(GV->getParent() == &M, "Referencing global in another module!", &I,
4125 &M, GV, GV->getParent());
4126 } else if (isa<Instruction>(I.getOperand(i))) {
4127 verifyDominatesUse(I, i);
4128 } else if (isa<InlineAsm>(I.getOperand(i))) {
4129 Assert(CBI && &CBI->getCalledOperandUse() == &I.getOperandUse(i),
4130 "Cannot take the address of an inline asm!", &I);
4131 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(I.getOperand(i))) {
4132 if (CE->getType()->isPtrOrPtrVectorTy() ||
4133 !DL.getNonIntegralAddressSpaces().empty()) {
4134 // If we have a ConstantExpr pointer, we need to see if it came from an
4135 // illegal bitcast. If the datalayout string specifies non-integral
4136 // address spaces then we also need to check for illegal ptrtoint and
4137 // inttoptr expressions.
4138 visitConstantExprsRecursively(CE);
4143 if (MDNode *MD = I.getMetadata(LLVMContext::MD_fpmath)) {
4144 Assert(I.getType()->isFPOrFPVectorTy(),
4145 "fpmath requires a floating point result!", &I);
4146 Assert(MD->getNumOperands() == 1, "fpmath takes one operand!", &I);
4147 if (ConstantFP *CFP0 =
4148 mdconst::dyn_extract_or_null<ConstantFP>(MD->getOperand(0))) {
4149 const APFloat &Accuracy = CFP0->getValueAPF();
4150 Assert(&Accuracy.getSemantics() == &APFloat::IEEEsingle(),
4151 "fpmath accuracy must have float type", &I);
4152 Assert(Accuracy.isFiniteNonZero() && !Accuracy.isNegative(),
4153 "fpmath accuracy not a positive number!", &I);
4154 } else {
4155 Assert(false, "invalid fpmath accuracy!", &I);
4159 if (MDNode *Range = I.getMetadata(LLVMContext::MD_range)) {
4160 Assert(isa<LoadInst>(I) || isa<CallInst>(I) || isa<InvokeInst>(I),
4161 "Ranges are only for loads, calls and invokes!", &I);
4162 visitRangeMetadata(I, Range, I.getType());
4165 if (I.getMetadata(LLVMContext::MD_nonnull)) {
4166 Assert(I.getType()->isPointerTy(), "nonnull applies only to pointer types",
4167 &I);
4168 Assert(isa<LoadInst>(I),
4169 "nonnull applies only to load instructions, use attributes"
4170 " for calls or invokes",
4171 &I);
4174 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable))
4175 visitDereferenceableMetadata(I, MD);
4177 if (MDNode *MD = I.getMetadata(LLVMContext::MD_dereferenceable_or_null))
4178 visitDereferenceableMetadata(I, MD);
4180 if (MDNode *TBAA = I.getMetadata(LLVMContext::MD_tbaa))
4181 TBAAVerifyHelper.visitTBAAMetadata(I, TBAA);
4183 if (MDNode *AlignMD = I.getMetadata(LLVMContext::MD_align)) {
4184 Assert(I.getType()->isPointerTy(), "align applies only to pointer types",
4185 &I);
4186 Assert(isa<LoadInst>(I), "align applies only to load instructions, "
4187 "use attributes for calls or invokes", &I);
4188 Assert(AlignMD->getNumOperands() == 1, "align takes one operand!", &I);
4189 ConstantInt *CI = mdconst::dyn_extract<ConstantInt>(AlignMD->getOperand(0));
4190 Assert(CI && CI->getType()->isIntegerTy(64),
4191 "align metadata value must be an i64!", &I);
4192 uint64_t Align = CI->getZExtValue();
4193 Assert(isPowerOf2_64(Align),
4194 "align metadata value must be a power of 2!", &I);
4195 Assert(Align <= Value::MaximumAlignment,
4196 "alignment is larger that implementation defined limit", &I);
4199 if (MDNode *MD = I.getMetadata(LLVMContext::MD_prof))
4200 visitProfMetadata(I, MD);
4202 if (MDNode *N = I.getDebugLoc().getAsMDNode()) {
4203 AssertDI(isa<DILocation>(N), "invalid !dbg metadata attachment", &I, N);
4204 visitMDNode(*N);
4207 if (auto *DII = dyn_cast<DbgVariableIntrinsic>(&I))
4208 verifyFragmentExpression(*DII);
4210 InstsInThisBlock.insert(&I);
4213 /// Allow intrinsics to be verified in different ways.
4214 void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
4215 Function *IF = Call.getCalledFunction();
4216 Assert(IF->isDeclaration(), "Intrinsic functions should never be defined!",
4217 IF);
4219 // Verify that the intrinsic prototype lines up with what the .td files
4220 // describe.
4221 FunctionType *IFTy = IF->getFunctionType();
4222 bool IsVarArg = IFTy->isVarArg();
4224 SmallVector<Intrinsic::IITDescriptor, 8> Table;
4225 getIntrinsicInfoTableEntries(ID, Table);
4226 ArrayRef<Intrinsic::IITDescriptor> TableRef = Table;
4228 // Walk the descriptors to extract overloaded types.
4229 SmallVector<Type *, 4> ArgTys;
4230 Intrinsic::MatchIntrinsicTypesResult Res =
4231 Intrinsic::matchIntrinsicSignature(IFTy, TableRef, ArgTys);
4232 Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchRet,
4233 "Intrinsic has incorrect return type!", IF);
4234 Assert(Res != Intrinsic::MatchIntrinsicTypes_NoMatchArg,
4235 "Intrinsic has incorrect argument type!", IF);
4237 // Verify if the intrinsic call matches the vararg property.
4238 if (IsVarArg)
4239 Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4240 "Intrinsic was not defined with variable arguments!", IF);
4241 else
4242 Assert(!Intrinsic::matchIntrinsicVarArg(IsVarArg, TableRef),
4243 "Callsite was not defined with variable arguments!", IF);
4245 // All descriptors should be absorbed by now.
4246 Assert(TableRef.empty(), "Intrinsic has too few arguments!", IF);
4248 // Now that we have the intrinsic ID and the actual argument types (and we
4249 // know they are legal for the intrinsic!) get the intrinsic name through the
4250 // usual means. This allows us to verify the mangling of argument types into
4251 // the name.
4252 const std::string ExpectedName = Intrinsic::getName(ID, ArgTys);
4253 Assert(ExpectedName == IF->getName(),
4254 "Intrinsic name not mangled correctly for type arguments! "
4255 "Should be: " +
4256 ExpectedName,
4257 IF);
4259 // If the intrinsic takes MDNode arguments, verify that they are either global
4260 // or are local to *this* function.
4261 for (Value *V : Call.args())
4262 if (auto *MD = dyn_cast<MetadataAsValue>(V))
4263 visitMetadataAsValue(*MD, Call.getCaller());
4265 switch (ID) {
4266 default:
4267 break;
4268 case Intrinsic::coro_id: {
4269 auto *InfoArg = Call.getArgOperand(3)->stripPointerCasts();
4270 if (isa<ConstantPointerNull>(InfoArg))
4271 break;
4272 auto *GV = dyn_cast<GlobalVariable>(InfoArg);
4273 Assert(GV && GV->isConstant() && GV->hasDefinitiveInitializer(),
4274 "info argument of llvm.coro.begin must refer to an initialized "
4275 "constant");
4276 Constant *Init = GV->getInitializer();
4277 Assert(isa<ConstantStruct>(Init) || isa<ConstantArray>(Init),
4278 "info argument of llvm.coro.begin must refer to either a struct or "
4279 "an array");
4280 break;
4282 case Intrinsic::experimental_constrained_fadd:
4283 case Intrinsic::experimental_constrained_fsub:
4284 case Intrinsic::experimental_constrained_fmul:
4285 case Intrinsic::experimental_constrained_fdiv:
4286 case Intrinsic::experimental_constrained_frem:
4287 case Intrinsic::experimental_constrained_fma:
4288 case Intrinsic::experimental_constrained_fptosi:
4289 case Intrinsic::experimental_constrained_fptoui:
4290 case Intrinsic::experimental_constrained_fptrunc:
4291 case Intrinsic::experimental_constrained_fpext:
4292 case Intrinsic::experimental_constrained_sqrt:
4293 case Intrinsic::experimental_constrained_pow:
4294 case Intrinsic::experimental_constrained_powi:
4295 case Intrinsic::experimental_constrained_sin:
4296 case Intrinsic::experimental_constrained_cos:
4297 case Intrinsic::experimental_constrained_exp:
4298 case Intrinsic::experimental_constrained_exp2:
4299 case Intrinsic::experimental_constrained_log:
4300 case Intrinsic::experimental_constrained_log10:
4301 case Intrinsic::experimental_constrained_log2:
4302 case Intrinsic::experimental_constrained_rint:
4303 case Intrinsic::experimental_constrained_nearbyint:
4304 case Intrinsic::experimental_constrained_maxnum:
4305 case Intrinsic::experimental_constrained_minnum:
4306 case Intrinsic::experimental_constrained_ceil:
4307 case Intrinsic::experimental_constrained_floor:
4308 case Intrinsic::experimental_constrained_round:
4309 case Intrinsic::experimental_constrained_trunc:
4310 visitConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(Call));
4311 break;
4312 case Intrinsic::dbg_declare: // llvm.dbg.declare
4313 Assert(isa<MetadataAsValue>(Call.getArgOperand(0)),
4314 "invalid llvm.dbg.declare intrinsic call 1", Call);
4315 visitDbgIntrinsic("declare", cast<DbgVariableIntrinsic>(Call));
4316 break;
4317 case Intrinsic::dbg_addr: // llvm.dbg.addr
4318 visitDbgIntrinsic("addr", cast<DbgVariableIntrinsic>(Call));
4319 break;
4320 case Intrinsic::dbg_value: // llvm.dbg.value
4321 visitDbgIntrinsic("value", cast<DbgVariableIntrinsic>(Call));
4322 break;
4323 case Intrinsic::dbg_label: // llvm.dbg.label
4324 visitDbgLabelIntrinsic("label", cast<DbgLabelInst>(Call));
4325 break;
4326 case Intrinsic::memcpy:
4327 case Intrinsic::memmove:
4328 case Intrinsic::memset: {
4329 const auto *MI = cast<MemIntrinsic>(&Call);
4330 auto IsValidAlignment = [&](unsigned Alignment) -> bool {
4331 return Alignment == 0 || isPowerOf2_32(Alignment);
4333 Assert(IsValidAlignment(MI->getDestAlignment()),
4334 "alignment of arg 0 of memory intrinsic must be 0 or a power of 2",
4335 Call);
4336 if (const auto *MTI = dyn_cast<MemTransferInst>(MI)) {
4337 Assert(IsValidAlignment(MTI->getSourceAlignment()),
4338 "alignment of arg 1 of memory intrinsic must be 0 or a power of 2",
4339 Call);
4342 break;
4344 case Intrinsic::memcpy_element_unordered_atomic:
4345 case Intrinsic::memmove_element_unordered_atomic:
4346 case Intrinsic::memset_element_unordered_atomic: {
4347 const auto *AMI = cast<AtomicMemIntrinsic>(&Call);
4349 ConstantInt *ElementSizeCI =
4350 cast<ConstantInt>(AMI->getRawElementSizeInBytes());
4351 const APInt &ElementSizeVal = ElementSizeCI->getValue();
4352 Assert(ElementSizeVal.isPowerOf2(),
4353 "element size of the element-wise atomic memory intrinsic "
4354 "must be a power of 2",
4355 Call);
4357 if (auto *LengthCI = dyn_cast<ConstantInt>(AMI->getLength())) {
4358 uint64_t Length = LengthCI->getZExtValue();
4359 uint64_t ElementSize = AMI->getElementSizeInBytes();
4360 Assert((Length % ElementSize) == 0,
4361 "constant length must be a multiple of the element size in the "
4362 "element-wise atomic memory intrinsic",
4363 Call);
4366 auto IsValidAlignment = [&](uint64_t Alignment) {
4367 return isPowerOf2_64(Alignment) && ElementSizeVal.ule(Alignment);
4369 uint64_t DstAlignment = AMI->getDestAlignment();
4370 Assert(IsValidAlignment(DstAlignment),
4371 "incorrect alignment of the destination argument", Call);
4372 if (const auto *AMT = dyn_cast<AtomicMemTransferInst>(AMI)) {
4373 uint64_t SrcAlignment = AMT->getSourceAlignment();
4374 Assert(IsValidAlignment(SrcAlignment),
4375 "incorrect alignment of the source argument", Call);
4377 break;
4379 case Intrinsic::gcroot:
4380 case Intrinsic::gcwrite:
4381 case Intrinsic::gcread:
4382 if (ID == Intrinsic::gcroot) {
4383 AllocaInst *AI =
4384 dyn_cast<AllocaInst>(Call.getArgOperand(0)->stripPointerCasts());
4385 Assert(AI, "llvm.gcroot parameter #1 must be an alloca.", Call);
4386 Assert(isa<Constant>(Call.getArgOperand(1)),
4387 "llvm.gcroot parameter #2 must be a constant.", Call);
4388 if (!AI->getAllocatedType()->isPointerTy()) {
4389 Assert(!isa<ConstantPointerNull>(Call.getArgOperand(1)),
4390 "llvm.gcroot parameter #1 must either be a pointer alloca, "
4391 "or argument #2 must be a non-null constant.",
4392 Call);
4396 Assert(Call.getParent()->getParent()->hasGC(),
4397 "Enclosing function does not use GC.", Call);
4398 break;
4399 case Intrinsic::init_trampoline:
4400 Assert(isa<Function>(Call.getArgOperand(1)->stripPointerCasts()),
4401 "llvm.init_trampoline parameter #2 must resolve to a function.",
4402 Call);
4403 break;
4404 case Intrinsic::prefetch:
4405 Assert(cast<ConstantInt>(Call.getArgOperand(1))->getZExtValue() < 2 &&
4406 cast<ConstantInt>(Call.getArgOperand(2))->getZExtValue() < 4,
4407 "invalid arguments to llvm.prefetch", Call);
4408 break;
4409 case Intrinsic::stackprotector:
4410 Assert(isa<AllocaInst>(Call.getArgOperand(1)->stripPointerCasts()),
4411 "llvm.stackprotector parameter #2 must resolve to an alloca.", Call);
4412 break;
4413 case Intrinsic::localescape: {
4414 BasicBlock *BB = Call.getParent();
4415 Assert(BB == &BB->getParent()->front(),
4416 "llvm.localescape used outside of entry block", Call);
4417 Assert(!SawFrameEscape,
4418 "multiple calls to llvm.localescape in one function", Call);
4419 for (Value *Arg : Call.args()) {
4420 if (isa<ConstantPointerNull>(Arg))
4421 continue; // Null values are allowed as placeholders.
4422 auto *AI = dyn_cast<AllocaInst>(Arg->stripPointerCasts());
4423 Assert(AI && AI->isStaticAlloca(),
4424 "llvm.localescape only accepts static allocas", Call);
4426 FrameEscapeInfo[BB->getParent()].first = Call.getNumArgOperands();
4427 SawFrameEscape = true;
4428 break;
4430 case Intrinsic::localrecover: {
4431 Value *FnArg = Call.getArgOperand(0)->stripPointerCasts();
4432 Function *Fn = dyn_cast<Function>(FnArg);
4433 Assert(Fn && !Fn->isDeclaration(),
4434 "llvm.localrecover first "
4435 "argument must be function defined in this module",
4436 Call);
4437 auto *IdxArg = cast<ConstantInt>(Call.getArgOperand(2));
4438 auto &Entry = FrameEscapeInfo[Fn];
4439 Entry.second = unsigned(
4440 std::max(uint64_t(Entry.second), IdxArg->getLimitedValue(~0U) + 1));
4441 break;
4444 case Intrinsic::experimental_gc_statepoint:
4445 if (auto *CI = dyn_cast<CallInst>(&Call))
4446 Assert(!CI->isInlineAsm(),
4447 "gc.statepoint support for inline assembly unimplemented", CI);
4448 Assert(Call.getParent()->getParent()->hasGC(),
4449 "Enclosing function does not use GC.", Call);
4451 verifyStatepoint(Call);
4452 break;
4453 case Intrinsic::experimental_gc_result: {
4454 Assert(Call.getParent()->getParent()->hasGC(),
4455 "Enclosing function does not use GC.", Call);
4456 // Are we tied to a statepoint properly?
4457 const auto *StatepointCall = dyn_cast<CallBase>(Call.getArgOperand(0));
4458 const Function *StatepointFn =
4459 StatepointCall ? StatepointCall->getCalledFunction() : nullptr;
4460 Assert(StatepointFn && StatepointFn->isDeclaration() &&
4461 StatepointFn->getIntrinsicID() ==
4462 Intrinsic::experimental_gc_statepoint,
4463 "gc.result operand #1 must be from a statepoint", Call,
4464 Call.getArgOperand(0));
4466 // Assert that result type matches wrapped callee.
4467 const Value *Target = StatepointCall->getArgOperand(2);
4468 auto *PT = cast<PointerType>(Target->getType());
4469 auto *TargetFuncType = cast<FunctionType>(PT->getElementType());
4470 Assert(Call.getType() == TargetFuncType->getReturnType(),
4471 "gc.result result type does not match wrapped callee", Call);
4472 break;
4474 case Intrinsic::experimental_gc_relocate: {
4475 Assert(Call.getNumArgOperands() == 3, "wrong number of arguments", Call);
4477 Assert(isa<PointerType>(Call.getType()->getScalarType()),
4478 "gc.relocate must return a pointer or a vector of pointers", Call);
4480 // Check that this relocate is correctly tied to the statepoint
4482 // This is case for relocate on the unwinding path of an invoke statepoint
4483 if (LandingPadInst *LandingPad =
4484 dyn_cast<LandingPadInst>(Call.getArgOperand(0))) {
4486 const BasicBlock *InvokeBB =
4487 LandingPad->getParent()->getUniquePredecessor();
4489 // Landingpad relocates should have only one predecessor with invoke
4490 // statepoint terminator
4491 Assert(InvokeBB, "safepoints should have unique landingpads",
4492 LandingPad->getParent());
4493 Assert(InvokeBB->getTerminator(), "safepoint block should be well formed",
4494 InvokeBB);
4495 Assert(isStatepoint(InvokeBB->getTerminator()),
4496 "gc relocate should be linked to a statepoint", InvokeBB);
4497 } else {
4498 // In all other cases relocate should be tied to the statepoint directly.
4499 // This covers relocates on a normal return path of invoke statepoint and
4500 // relocates of a call statepoint.
4501 auto Token = Call.getArgOperand(0);
4502 Assert(isa<Instruction>(Token) && isStatepoint(cast<Instruction>(Token)),
4503 "gc relocate is incorrectly tied to the statepoint", Call, Token);
4506 // Verify rest of the relocate arguments.
4507 const CallBase &StatepointCall =
4508 *cast<CallBase>(cast<GCRelocateInst>(Call).getStatepoint());
4510 // Both the base and derived must be piped through the safepoint.
4511 Value *Base = Call.getArgOperand(1);
4512 Assert(isa<ConstantInt>(Base),
4513 "gc.relocate operand #2 must be integer offset", Call);
4515 Value *Derived = Call.getArgOperand(2);
4516 Assert(isa<ConstantInt>(Derived),
4517 "gc.relocate operand #3 must be integer offset", Call);
4519 const int BaseIndex = cast<ConstantInt>(Base)->getZExtValue();
4520 const int DerivedIndex = cast<ConstantInt>(Derived)->getZExtValue();
4521 // Check the bounds
4522 Assert(0 <= BaseIndex && BaseIndex < (int)StatepointCall.arg_size(),
4523 "gc.relocate: statepoint base index out of bounds", Call);
4524 Assert(0 <= DerivedIndex && DerivedIndex < (int)StatepointCall.arg_size(),
4525 "gc.relocate: statepoint derived index out of bounds", Call);
4527 // Check that BaseIndex and DerivedIndex fall within the 'gc parameters'
4528 // section of the statepoint's argument.
4529 Assert(StatepointCall.arg_size() > 0,
4530 "gc.statepoint: insufficient arguments");
4531 Assert(isa<ConstantInt>(StatepointCall.getArgOperand(3)),
4532 "gc.statement: number of call arguments must be constant integer");
4533 const unsigned NumCallArgs =
4534 cast<ConstantInt>(StatepointCall.getArgOperand(3))->getZExtValue();
4535 Assert(StatepointCall.arg_size() > NumCallArgs + 5,
4536 "gc.statepoint: mismatch in number of call arguments");
4537 Assert(isa<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5)),
4538 "gc.statepoint: number of transition arguments must be "
4539 "a constant integer");
4540 const int NumTransitionArgs =
4541 cast<ConstantInt>(StatepointCall.getArgOperand(NumCallArgs + 5))
4542 ->getZExtValue();
4543 const int DeoptArgsStart = 4 + NumCallArgs + 1 + NumTransitionArgs + 1;
4544 Assert(isa<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart)),
4545 "gc.statepoint: number of deoptimization arguments must be "
4546 "a constant integer");
4547 const int NumDeoptArgs =
4548 cast<ConstantInt>(StatepointCall.getArgOperand(DeoptArgsStart))
4549 ->getZExtValue();
4550 const int GCParamArgsStart = DeoptArgsStart + 1 + NumDeoptArgs;
4551 const int GCParamArgsEnd = StatepointCall.arg_size();
4552 Assert(GCParamArgsStart <= BaseIndex && BaseIndex < GCParamArgsEnd,
4553 "gc.relocate: statepoint base index doesn't fall within the "
4554 "'gc parameters' section of the statepoint call",
4555 Call);
4556 Assert(GCParamArgsStart <= DerivedIndex && DerivedIndex < GCParamArgsEnd,
4557 "gc.relocate: statepoint derived index doesn't fall within the "
4558 "'gc parameters' section of the statepoint call",
4559 Call);
4561 // Relocated value must be either a pointer type or vector-of-pointer type,
4562 // but gc_relocate does not need to return the same pointer type as the
4563 // relocated pointer. It can be casted to the correct type later if it's
4564 // desired. However, they must have the same address space and 'vectorness'
4565 GCRelocateInst &Relocate = cast<GCRelocateInst>(Call);
4566 Assert(Relocate.getDerivedPtr()->getType()->isPtrOrPtrVectorTy(),
4567 "gc.relocate: relocated value must be a gc pointer", Call);
4569 auto ResultType = Call.getType();
4570 auto DerivedType = Relocate.getDerivedPtr()->getType();
4571 Assert(ResultType->isVectorTy() == DerivedType->isVectorTy(),
4572 "gc.relocate: vector relocates to vector and pointer to pointer",
4573 Call);
4574 Assert(
4575 ResultType->getPointerAddressSpace() ==
4576 DerivedType->getPointerAddressSpace(),
4577 "gc.relocate: relocating a pointer shouldn't change its address space",
4578 Call);
4579 break;
4581 case Intrinsic::eh_exceptioncode:
4582 case Intrinsic::eh_exceptionpointer: {
4583 Assert(isa<CatchPadInst>(Call.getArgOperand(0)),
4584 "eh.exceptionpointer argument must be a catchpad", Call);
4585 break;
4587 case Intrinsic::masked_load: {
4588 Assert(Call.getType()->isVectorTy(), "masked_load: must return a vector",
4589 Call);
4591 Value *Ptr = Call.getArgOperand(0);
4592 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(1));
4593 Value *Mask = Call.getArgOperand(2);
4594 Value *PassThru = Call.getArgOperand(3);
4595 Assert(Mask->getType()->isVectorTy(), "masked_load: mask must be vector",
4596 Call);
4597 Assert(Alignment->getValue().isPowerOf2(),
4598 "masked_load: alignment must be a power of 2", Call);
4600 // DataTy is the overloaded type
4601 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4602 Assert(DataTy == Call.getType(),
4603 "masked_load: return must match pointer type", Call);
4604 Assert(PassThru->getType() == DataTy,
4605 "masked_load: pass through and data type must match", Call);
4606 Assert(Mask->getType()->getVectorNumElements() ==
4607 DataTy->getVectorNumElements(),
4608 "masked_load: vector mask must be same length as data", Call);
4609 break;
4611 case Intrinsic::masked_store: {
4612 Value *Val = Call.getArgOperand(0);
4613 Value *Ptr = Call.getArgOperand(1);
4614 ConstantInt *Alignment = cast<ConstantInt>(Call.getArgOperand(2));
4615 Value *Mask = Call.getArgOperand(3);
4616 Assert(Mask->getType()->isVectorTy(), "masked_store: mask must be vector",
4617 Call);
4618 Assert(Alignment->getValue().isPowerOf2(),
4619 "masked_store: alignment must be a power of 2", Call);
4621 // DataTy is the overloaded type
4622 Type *DataTy = cast<PointerType>(Ptr->getType())->getElementType();
4623 Assert(DataTy == Val->getType(),
4624 "masked_store: storee must match pointer type", Call);
4625 Assert(Mask->getType()->getVectorNumElements() ==
4626 DataTy->getVectorNumElements(),
4627 "masked_store: vector mask must be same length as data", Call);
4628 break;
4631 case Intrinsic::experimental_guard: {
4632 Assert(isa<CallInst>(Call), "experimental_guard cannot be invoked", Call);
4633 Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4634 "experimental_guard must have exactly one "
4635 "\"deopt\" operand bundle");
4636 break;
4639 case Intrinsic::experimental_deoptimize: {
4640 Assert(isa<CallInst>(Call), "experimental_deoptimize cannot be invoked",
4641 Call);
4642 Assert(Call.countOperandBundlesOfType(LLVMContext::OB_deopt) == 1,
4643 "experimental_deoptimize must have exactly one "
4644 "\"deopt\" operand bundle");
4645 Assert(Call.getType() == Call.getFunction()->getReturnType(),
4646 "experimental_deoptimize return type must match caller return type");
4648 if (isa<CallInst>(Call)) {
4649 auto *RI = dyn_cast<ReturnInst>(Call.getNextNode());
4650 Assert(RI,
4651 "calls to experimental_deoptimize must be followed by a return");
4653 if (!Call.getType()->isVoidTy() && RI)
4654 Assert(RI->getReturnValue() == &Call,
4655 "calls to experimental_deoptimize must be followed by a return "
4656 "of the value computed by experimental_deoptimize");
4659 break;
4661 case Intrinsic::sadd_sat:
4662 case Intrinsic::uadd_sat:
4663 case Intrinsic::ssub_sat:
4664 case Intrinsic::usub_sat: {
4665 Value *Op1 = Call.getArgOperand(0);
4666 Value *Op2 = Call.getArgOperand(1);
4667 Assert(Op1->getType()->isIntOrIntVectorTy(),
4668 "first operand of [us][add|sub]_sat must be an int type or vector "
4669 "of ints");
4670 Assert(Op2->getType()->isIntOrIntVectorTy(),
4671 "second operand of [us][add|sub]_sat must be an int type or vector "
4672 "of ints");
4673 break;
4675 case Intrinsic::smul_fix:
4676 case Intrinsic::smul_fix_sat:
4677 case Intrinsic::umul_fix:
4678 case Intrinsic::umul_fix_sat: {
4679 Value *Op1 = Call.getArgOperand(0);
4680 Value *Op2 = Call.getArgOperand(1);
4681 Assert(Op1->getType()->isIntOrIntVectorTy(),
4682 "first operand of [us]mul_fix[_sat] must be an int type or vector "
4683 "of ints");
4684 Assert(Op2->getType()->isIntOrIntVectorTy(),
4685 "second operand of [us]mul_fix_[sat] must be an int type or vector "
4686 "of ints");
4688 auto *Op3 = cast<ConstantInt>(Call.getArgOperand(2));
4689 Assert(Op3->getType()->getBitWidth() <= 32,
4690 "third argument of [us]mul_fix[_sat] must fit within 32 bits");
4692 if (ID == Intrinsic::smul_fix || ID == Intrinsic::smul_fix_sat) {
4693 Assert(
4694 Op3->getZExtValue() < Op1->getType()->getScalarSizeInBits(),
4695 "the scale of smul_fix[_sat] must be less than the width of the operands");
4696 } else {
4697 Assert(Op3->getZExtValue() <= Op1->getType()->getScalarSizeInBits(),
4698 "the scale of umul_fix[_sat] must be less than or equal to the width of "
4699 "the operands");
4701 break;
4703 case Intrinsic::lround:
4704 case Intrinsic::llround:
4705 case Intrinsic::lrint:
4706 case Intrinsic::llrint: {
4707 Type *ValTy = Call.getArgOperand(0)->getType();
4708 Type *ResultTy = Call.getType();
4709 Assert(!ValTy->isVectorTy() && !ResultTy->isVectorTy(),
4710 "Intrinsic does not support vectors", &Call);
4711 break;
4716 /// Carefully grab the subprogram from a local scope.
4718 /// This carefully grabs the subprogram from a local scope, avoiding the
4719 /// built-in assertions that would typically fire.
4720 static DISubprogram *getSubprogram(Metadata *LocalScope) {
4721 if (!LocalScope)
4722 return nullptr;
4724 if (auto *SP = dyn_cast<DISubprogram>(LocalScope))
4725 return SP;
4727 if (auto *LB = dyn_cast<DILexicalBlockBase>(LocalScope))
4728 return getSubprogram(LB->getRawScope());
4730 // Just return null; broken scope chains are checked elsewhere.
4731 assert(!isa<DILocalScope>(LocalScope) && "Unknown type of local scope");
4732 return nullptr;
4735 void Verifier::visitConstrainedFPIntrinsic(ConstrainedFPIntrinsic &FPI) {
4736 unsigned NumOperands = FPI.getNumArgOperands();
4737 bool HasExceptionMD = false;
4738 bool HasRoundingMD = false;
4739 switch (FPI.getIntrinsicID()) {
4740 case Intrinsic::experimental_constrained_sqrt:
4741 case Intrinsic::experimental_constrained_sin:
4742 case Intrinsic::experimental_constrained_cos:
4743 case Intrinsic::experimental_constrained_exp:
4744 case Intrinsic::experimental_constrained_exp2:
4745 case Intrinsic::experimental_constrained_log:
4746 case Intrinsic::experimental_constrained_log10:
4747 case Intrinsic::experimental_constrained_log2:
4748 case Intrinsic::experimental_constrained_rint:
4749 case Intrinsic::experimental_constrained_nearbyint:
4750 case Intrinsic::experimental_constrained_ceil:
4751 case Intrinsic::experimental_constrained_floor:
4752 case Intrinsic::experimental_constrained_round:
4753 case Intrinsic::experimental_constrained_trunc:
4754 Assert((NumOperands == 3), "invalid arguments for constrained FP intrinsic",
4755 &FPI);
4756 HasExceptionMD = true;
4757 HasRoundingMD = true;
4758 break;
4760 case Intrinsic::experimental_constrained_fma:
4761 Assert((NumOperands == 5), "invalid arguments for constrained FP intrinsic",
4762 &FPI);
4763 HasExceptionMD = true;
4764 HasRoundingMD = true;
4765 break;
4767 case Intrinsic::experimental_constrained_fadd:
4768 case Intrinsic::experimental_constrained_fsub:
4769 case Intrinsic::experimental_constrained_fmul:
4770 case Intrinsic::experimental_constrained_fdiv:
4771 case Intrinsic::experimental_constrained_frem:
4772 case Intrinsic::experimental_constrained_pow:
4773 case Intrinsic::experimental_constrained_powi:
4774 case Intrinsic::experimental_constrained_maxnum:
4775 case Intrinsic::experimental_constrained_minnum:
4776 Assert((NumOperands == 4), "invalid arguments for constrained FP intrinsic",
4777 &FPI);
4778 HasExceptionMD = true;
4779 HasRoundingMD = true;
4780 break;
4782 case Intrinsic::experimental_constrained_fptosi:
4783 case Intrinsic::experimental_constrained_fptoui: {
4784 Assert((NumOperands == 2),
4785 "invalid arguments for constrained FP intrinsic", &FPI);
4786 HasExceptionMD = true;
4788 Value *Operand = FPI.getArgOperand(0);
4789 uint64_t NumSrcElem = 0;
4790 Assert(Operand->getType()->isFPOrFPVectorTy(),
4791 "Intrinsic first argument must be floating point", &FPI);
4792 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
4793 NumSrcElem = OperandT->getNumElements();
4796 Operand = &FPI;
4797 Assert((NumSrcElem > 0) == Operand->getType()->isVectorTy(),
4798 "Intrinsic first argument and result disagree on vector use", &FPI);
4799 Assert(Operand->getType()->isIntOrIntVectorTy(),
4800 "Intrinsic result must be an integer", &FPI);
4801 if (auto *OperandT = dyn_cast<VectorType>(Operand->getType())) {
4802 Assert(NumSrcElem == OperandT->getNumElements(),
4803 "Intrinsic first argument and result vector lengths must be equal",
4804 &FPI);
4807 break;
4809 case Intrinsic::experimental_constrained_fptrunc:
4810 case Intrinsic::experimental_constrained_fpext: {
4811 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
4812 Assert((NumOperands == 3),
4813 "invalid arguments for constrained FP intrinsic", &FPI);
4814 HasRoundingMD = true;
4815 } else {
4816 Assert((NumOperands == 2),
4817 "invalid arguments for constrained FP intrinsic", &FPI);
4819 HasExceptionMD = true;
4821 Value *Operand = FPI.getArgOperand(0);
4822 Type *OperandTy = Operand->getType();
4823 Value *Result = &FPI;
4824 Type *ResultTy = Result->getType();
4825 Assert(OperandTy->isFPOrFPVectorTy(),
4826 "Intrinsic first argument must be FP or FP vector", &FPI);
4827 Assert(ResultTy->isFPOrFPVectorTy(),
4828 "Intrinsic result must be FP or FP vector", &FPI);
4829 Assert(OperandTy->isVectorTy() == ResultTy->isVectorTy(),
4830 "Intrinsic first argument and result disagree on vector use", &FPI);
4831 if (OperandTy->isVectorTy()) {
4832 auto *OperandVecTy = cast<VectorType>(OperandTy);
4833 auto *ResultVecTy = cast<VectorType>(ResultTy);
4834 Assert(OperandVecTy->getNumElements() == ResultVecTy->getNumElements(),
4835 "Intrinsic first argument and result vector lengths must be equal",
4836 &FPI);
4838 if (FPI.getIntrinsicID() == Intrinsic::experimental_constrained_fptrunc) {
4839 Assert(OperandTy->getScalarSizeInBits() > ResultTy->getScalarSizeInBits(),
4840 "Intrinsic first argument's type must be larger than result type",
4841 &FPI);
4842 } else {
4843 Assert(OperandTy->getScalarSizeInBits() < ResultTy->getScalarSizeInBits(),
4844 "Intrinsic first argument's type must be smaller than result type",
4845 &FPI);
4848 break;
4850 default:
4851 llvm_unreachable("Invalid constrained FP intrinsic!");
4854 // If a non-metadata argument is passed in a metadata slot then the
4855 // error will be caught earlier when the incorrect argument doesn't
4856 // match the specification in the intrinsic call table. Thus, no
4857 // argument type check is needed here.
4859 if (HasExceptionMD) {
4860 Assert(FPI.getExceptionBehavior().hasValue(),
4861 "invalid exception behavior argument", &FPI);
4863 if (HasRoundingMD) {
4864 Assert(FPI.getRoundingMode().hasValue(),
4865 "invalid rounding mode argument", &FPI);
4869 void Verifier::visitDbgIntrinsic(StringRef Kind, DbgVariableIntrinsic &DII) {
4870 auto *MD = cast<MetadataAsValue>(DII.getArgOperand(0))->getMetadata();
4871 AssertDI(isa<ValueAsMetadata>(MD) ||
4872 (isa<MDNode>(MD) && !cast<MDNode>(MD)->getNumOperands()),
4873 "invalid llvm.dbg." + Kind + " intrinsic address/value", &DII, MD);
4874 AssertDI(isa<DILocalVariable>(DII.getRawVariable()),
4875 "invalid llvm.dbg." + Kind + " intrinsic variable", &DII,
4876 DII.getRawVariable());
4877 AssertDI(isa<DIExpression>(DII.getRawExpression()),
4878 "invalid llvm.dbg." + Kind + " intrinsic expression", &DII,
4879 DII.getRawExpression());
4881 // Ignore broken !dbg attachments; they're checked elsewhere.
4882 if (MDNode *N = DII.getDebugLoc().getAsMDNode())
4883 if (!isa<DILocation>(N))
4884 return;
4886 BasicBlock *BB = DII.getParent();
4887 Function *F = BB ? BB->getParent() : nullptr;
4889 // The scopes for variables and !dbg attachments must agree.
4890 DILocalVariable *Var = DII.getVariable();
4891 DILocation *Loc = DII.getDebugLoc();
4892 AssertDI(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4893 &DII, BB, F);
4895 DISubprogram *VarSP = getSubprogram(Var->getRawScope());
4896 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4897 if (!VarSP || !LocSP)
4898 return; // Broken scope chains are checked elsewhere.
4900 AssertDI(VarSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4901 " variable and !dbg attachment",
4902 &DII, BB, F, Var, Var->getScope()->getSubprogram(), Loc,
4903 Loc->getScope()->getSubprogram());
4905 // This check is redundant with one in visitLocalVariable().
4906 AssertDI(isType(Var->getRawType()), "invalid type ref", Var,
4907 Var->getRawType());
4908 verifyFnArgs(DII);
4911 void Verifier::visitDbgLabelIntrinsic(StringRef Kind, DbgLabelInst &DLI) {
4912 AssertDI(isa<DILabel>(DLI.getRawLabel()),
4913 "invalid llvm.dbg." + Kind + " intrinsic variable", &DLI,
4914 DLI.getRawLabel());
4916 // Ignore broken !dbg attachments; they're checked elsewhere.
4917 if (MDNode *N = DLI.getDebugLoc().getAsMDNode())
4918 if (!isa<DILocation>(N))
4919 return;
4921 BasicBlock *BB = DLI.getParent();
4922 Function *F = BB ? BB->getParent() : nullptr;
4924 // The scopes for variables and !dbg attachments must agree.
4925 DILabel *Label = DLI.getLabel();
4926 DILocation *Loc = DLI.getDebugLoc();
4927 Assert(Loc, "llvm.dbg." + Kind + " intrinsic requires a !dbg attachment",
4928 &DLI, BB, F);
4930 DISubprogram *LabelSP = getSubprogram(Label->getRawScope());
4931 DISubprogram *LocSP = getSubprogram(Loc->getRawScope());
4932 if (!LabelSP || !LocSP)
4933 return;
4935 AssertDI(LabelSP == LocSP, "mismatched subprogram between llvm.dbg." + Kind +
4936 " label and !dbg attachment",
4937 &DLI, BB, F, Label, Label->getScope()->getSubprogram(), Loc,
4938 Loc->getScope()->getSubprogram());
4941 void Verifier::verifyFragmentExpression(const DbgVariableIntrinsic &I) {
4942 DILocalVariable *V = dyn_cast_or_null<DILocalVariable>(I.getRawVariable());
4943 DIExpression *E = dyn_cast_or_null<DIExpression>(I.getRawExpression());
4945 // We don't know whether this intrinsic verified correctly.
4946 if (!V || !E || !E->isValid())
4947 return;
4949 // Nothing to do if this isn't a DW_OP_LLVM_fragment expression.
4950 auto Fragment = E->getFragmentInfo();
4951 if (!Fragment)
4952 return;
4954 // The frontend helps out GDB by emitting the members of local anonymous
4955 // unions as artificial local variables with shared storage. When SROA splits
4956 // the storage for artificial local variables that are smaller than the entire
4957 // union, the overhang piece will be outside of the allotted space for the
4958 // variable and this check fails.
4959 // FIXME: Remove this check as soon as clang stops doing this; it hides bugs.
4960 if (V->isArtificial())
4961 return;
4963 verifyFragmentExpression(*V, *Fragment, &I);
4966 template <typename ValueOrMetadata>
4967 void Verifier::verifyFragmentExpression(const DIVariable &V,
4968 DIExpression::FragmentInfo Fragment,
4969 ValueOrMetadata *Desc) {
4970 // If there's no size, the type is broken, but that should be checked
4971 // elsewhere.
4972 auto VarSize = V.getSizeInBits();
4973 if (!VarSize)
4974 return;
4976 unsigned FragSize = Fragment.SizeInBits;
4977 unsigned FragOffset = Fragment.OffsetInBits;
4978 AssertDI(FragSize + FragOffset <= *VarSize,
4979 "fragment is larger than or outside of variable", Desc, &V);
4980 AssertDI(FragSize != *VarSize, "fragment covers entire variable", Desc, &V);
4983 void Verifier::verifyFnArgs(const DbgVariableIntrinsic &I) {
4984 // This function does not take the scope of noninlined function arguments into
4985 // account. Don't run it if current function is nodebug, because it may
4986 // contain inlined debug intrinsics.
4987 if (!HasDebugInfo)
4988 return;
4990 // For performance reasons only check non-inlined ones.
4991 if (I.getDebugLoc()->getInlinedAt())
4992 return;
4994 DILocalVariable *Var = I.getVariable();
4995 AssertDI(Var, "dbg intrinsic without variable");
4997 unsigned ArgNo = Var->getArg();
4998 if (!ArgNo)
4999 return;
5001 // Verify there are no duplicate function argument debug info entries.
5002 // These will cause hard-to-debug assertions in the DWARF backend.
5003 if (DebugFnArgs.size() < ArgNo)
5004 DebugFnArgs.resize(ArgNo, nullptr);
5006 auto *Prev = DebugFnArgs[ArgNo - 1];
5007 DebugFnArgs[ArgNo - 1] = Var;
5008 AssertDI(!Prev || (Prev == Var), "conflicting debug info for argument", &I,
5009 Prev, Var);
5012 void Verifier::verifyCompileUnits() {
5013 // When more than one Module is imported into the same context, such as during
5014 // an LTO build before linking the modules, ODR type uniquing may cause types
5015 // to point to a different CU. This check does not make sense in this case.
5016 if (M.getContext().isODRUniquingDebugTypes())
5017 return;
5018 auto *CUs = M.getNamedMetadata("llvm.dbg.cu");
5019 SmallPtrSet<const Metadata *, 2> Listed;
5020 if (CUs)
5021 Listed.insert(CUs->op_begin(), CUs->op_end());
5022 for (auto *CU : CUVisited)
5023 AssertDI(Listed.count(CU), "DICompileUnit not listed in llvm.dbg.cu", CU);
5024 CUVisited.clear();
5027 void Verifier::verifyDeoptimizeCallingConvs() {
5028 if (DeoptimizeDeclarations.empty())
5029 return;
5031 const Function *First = DeoptimizeDeclarations[0];
5032 for (auto *F : makeArrayRef(DeoptimizeDeclarations).slice(1)) {
5033 Assert(First->getCallingConv() == F->getCallingConv(),
5034 "All llvm.experimental.deoptimize declarations must have the same "
5035 "calling convention",
5036 First, F);
5040 void Verifier::verifySourceDebugInfo(const DICompileUnit &U, const DIFile &F) {
5041 bool HasSource = F.getSource().hasValue();
5042 if (!HasSourceDebugInfo.count(&U))
5043 HasSourceDebugInfo[&U] = HasSource;
5044 AssertDI(HasSource == HasSourceDebugInfo[&U],
5045 "inconsistent use of embedded source");
5048 //===----------------------------------------------------------------------===//
5049 // Implement the public interfaces to this file...
5050 //===----------------------------------------------------------------------===//
5052 bool llvm::verifyFunction(const Function &f, raw_ostream *OS) {
5053 Function &F = const_cast<Function &>(f);
5055 // Don't use a raw_null_ostream. Printing IR is expensive.
5056 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/true, *f.getParent());
5058 // Note that this function's return value is inverted from what you would
5059 // expect of a function called "verify".
5060 return !V.verify(F);
5063 bool llvm::verifyModule(const Module &M, raw_ostream *OS,
5064 bool *BrokenDebugInfo) {
5065 // Don't use a raw_null_ostream. Printing IR is expensive.
5066 Verifier V(OS, /*ShouldTreatBrokenDebugInfoAsError=*/!BrokenDebugInfo, M);
5068 bool Broken = false;
5069 for (const Function &F : M)
5070 Broken |= !V.verify(F);
5072 Broken |= !V.verify();
5073 if (BrokenDebugInfo)
5074 *BrokenDebugInfo = V.hasBrokenDebugInfo();
5075 // Note that this function's return value is inverted from what you would
5076 // expect of a function called "verify".
5077 return Broken;
5080 namespace {
5082 struct VerifierLegacyPass : public FunctionPass {
5083 static char ID;
5085 std::unique_ptr<Verifier> V;
5086 bool FatalErrors = true;
5088 VerifierLegacyPass() : FunctionPass(ID) {
5089 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
5091 explicit VerifierLegacyPass(bool FatalErrors)
5092 : FunctionPass(ID),
5093 FatalErrors(FatalErrors) {
5094 initializeVerifierLegacyPassPass(*PassRegistry::getPassRegistry());
5097 bool doInitialization(Module &M) override {
5098 V = std::make_unique<Verifier>(
5099 &dbgs(), /*ShouldTreatBrokenDebugInfoAsError=*/false, M);
5100 return false;
5103 bool runOnFunction(Function &F) override {
5104 if (!V->verify(F) && FatalErrors) {
5105 errs() << "in function " << F.getName() << '\n';
5106 report_fatal_error("Broken function found, compilation aborted!");
5108 return false;
5111 bool doFinalization(Module &M) override {
5112 bool HasErrors = false;
5113 for (Function &F : M)
5114 if (F.isDeclaration())
5115 HasErrors |= !V->verify(F);
5117 HasErrors |= !V->verify();
5118 if (FatalErrors && (HasErrors || V->hasBrokenDebugInfo()))
5119 report_fatal_error("Broken module found, compilation aborted!");
5120 return false;
5123 void getAnalysisUsage(AnalysisUsage &AU) const override {
5124 AU.setPreservesAll();
5128 } // end anonymous namespace
5130 /// Helper to issue failure from the TBAA verification
5131 template <typename... Tys> void TBAAVerifier::CheckFailed(Tys &&... Args) {
5132 if (Diagnostic)
5133 return Diagnostic->CheckFailed(Args...);
5136 #define AssertTBAA(C, ...) \
5137 do { \
5138 if (!(C)) { \
5139 CheckFailed(__VA_ARGS__); \
5140 return false; \
5142 } while (false)
5144 /// Verify that \p BaseNode can be used as the "base type" in the struct-path
5145 /// TBAA scheme. This means \p BaseNode is either a scalar node, or a
5146 /// struct-type node describing an aggregate data structure (like a struct).
5147 TBAAVerifier::TBAABaseNodeSummary
5148 TBAAVerifier::verifyTBAABaseNode(Instruction &I, const MDNode *BaseNode,
5149 bool IsNewFormat) {
5150 if (BaseNode->getNumOperands() < 2) {
5151 CheckFailed("Base nodes must have at least two operands", &I, BaseNode);
5152 return {true, ~0u};
5155 auto Itr = TBAABaseNodes.find(BaseNode);
5156 if (Itr != TBAABaseNodes.end())
5157 return Itr->second;
5159 auto Result = verifyTBAABaseNodeImpl(I, BaseNode, IsNewFormat);
5160 auto InsertResult = TBAABaseNodes.insert({BaseNode, Result});
5161 (void)InsertResult;
5162 assert(InsertResult.second && "We just checked!");
5163 return Result;
5166 TBAAVerifier::TBAABaseNodeSummary
5167 TBAAVerifier::verifyTBAABaseNodeImpl(Instruction &I, const MDNode *BaseNode,
5168 bool IsNewFormat) {
5169 const TBAAVerifier::TBAABaseNodeSummary InvalidNode = {true, ~0u};
5171 if (BaseNode->getNumOperands() == 2) {
5172 // Scalar nodes can only be accessed at offset 0.
5173 return isValidScalarTBAANode(BaseNode)
5174 ? TBAAVerifier::TBAABaseNodeSummary({false, 0})
5175 : InvalidNode;
5178 if (IsNewFormat) {
5179 if (BaseNode->getNumOperands() % 3 != 0) {
5180 CheckFailed("Access tag nodes must have the number of operands that is a "
5181 "multiple of 3!", BaseNode);
5182 return InvalidNode;
5184 } else {
5185 if (BaseNode->getNumOperands() % 2 != 1) {
5186 CheckFailed("Struct tag nodes must have an odd number of operands!",
5187 BaseNode);
5188 return InvalidNode;
5192 // Check the type size field.
5193 if (IsNewFormat) {
5194 auto *TypeSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5195 BaseNode->getOperand(1));
5196 if (!TypeSizeNode) {
5197 CheckFailed("Type size nodes must be constants!", &I, BaseNode);
5198 return InvalidNode;
5202 // Check the type name field. In the new format it can be anything.
5203 if (!IsNewFormat && !isa<MDString>(BaseNode->getOperand(0))) {
5204 CheckFailed("Struct tag nodes have a string as their first operand",
5205 BaseNode);
5206 return InvalidNode;
5209 bool Failed = false;
5211 Optional<APInt> PrevOffset;
5212 unsigned BitWidth = ~0u;
5214 // We've already checked that BaseNode is not a degenerate root node with one
5215 // operand in \c verifyTBAABaseNode, so this loop should run at least once.
5216 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5217 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5218 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5219 Idx += NumOpsPerField) {
5220 const MDOperand &FieldTy = BaseNode->getOperand(Idx);
5221 const MDOperand &FieldOffset = BaseNode->getOperand(Idx + 1);
5222 if (!isa<MDNode>(FieldTy)) {
5223 CheckFailed("Incorrect field entry in struct type node!", &I, BaseNode);
5224 Failed = true;
5225 continue;
5228 auto *OffsetEntryCI =
5229 mdconst::dyn_extract_or_null<ConstantInt>(FieldOffset);
5230 if (!OffsetEntryCI) {
5231 CheckFailed("Offset entries must be constants!", &I, BaseNode);
5232 Failed = true;
5233 continue;
5236 if (BitWidth == ~0u)
5237 BitWidth = OffsetEntryCI->getBitWidth();
5239 if (OffsetEntryCI->getBitWidth() != BitWidth) {
5240 CheckFailed(
5241 "Bitwidth between the offsets and struct type entries must match", &I,
5242 BaseNode);
5243 Failed = true;
5244 continue;
5247 // NB! As far as I can tell, we generate a non-strictly increasing offset
5248 // sequence only from structs that have zero size bit fields. When
5249 // recursing into a contained struct in \c getFieldNodeFromTBAABaseNode we
5250 // pick the field lexically the latest in struct type metadata node. This
5251 // mirrors the actual behavior of the alias analysis implementation.
5252 bool IsAscending =
5253 !PrevOffset || PrevOffset->ule(OffsetEntryCI->getValue());
5255 if (!IsAscending) {
5256 CheckFailed("Offsets must be increasing!", &I, BaseNode);
5257 Failed = true;
5260 PrevOffset = OffsetEntryCI->getValue();
5262 if (IsNewFormat) {
5263 auto *MemberSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5264 BaseNode->getOperand(Idx + 2));
5265 if (!MemberSizeNode) {
5266 CheckFailed("Member size entries must be constants!", &I, BaseNode);
5267 Failed = true;
5268 continue;
5273 return Failed ? InvalidNode
5274 : TBAAVerifier::TBAABaseNodeSummary(false, BitWidth);
5277 static bool IsRootTBAANode(const MDNode *MD) {
5278 return MD->getNumOperands() < 2;
5281 static bool IsScalarTBAANodeImpl(const MDNode *MD,
5282 SmallPtrSetImpl<const MDNode *> &Visited) {
5283 if (MD->getNumOperands() != 2 && MD->getNumOperands() != 3)
5284 return false;
5286 if (!isa<MDString>(MD->getOperand(0)))
5287 return false;
5289 if (MD->getNumOperands() == 3) {
5290 auto *Offset = mdconst::dyn_extract<ConstantInt>(MD->getOperand(2));
5291 if (!(Offset && Offset->isZero() && isa<MDString>(MD->getOperand(0))))
5292 return false;
5295 auto *Parent = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5296 return Parent && Visited.insert(Parent).second &&
5297 (IsRootTBAANode(Parent) || IsScalarTBAANodeImpl(Parent, Visited));
5300 bool TBAAVerifier::isValidScalarTBAANode(const MDNode *MD) {
5301 auto ResultIt = TBAAScalarNodes.find(MD);
5302 if (ResultIt != TBAAScalarNodes.end())
5303 return ResultIt->second;
5305 SmallPtrSet<const MDNode *, 4> Visited;
5306 bool Result = IsScalarTBAANodeImpl(MD, Visited);
5307 auto InsertResult = TBAAScalarNodes.insert({MD, Result});
5308 (void)InsertResult;
5309 assert(InsertResult.second && "Just checked!");
5311 return Result;
5314 /// Returns the field node at the offset \p Offset in \p BaseNode. Update \p
5315 /// Offset in place to be the offset within the field node returned.
5317 /// We assume we've okayed \p BaseNode via \c verifyTBAABaseNode.
5318 MDNode *TBAAVerifier::getFieldNodeFromTBAABaseNode(Instruction &I,
5319 const MDNode *BaseNode,
5320 APInt &Offset,
5321 bool IsNewFormat) {
5322 assert(BaseNode->getNumOperands() >= 2 && "Invalid base node!");
5324 // Scalar nodes have only one possible "field" -- their parent in the access
5325 // hierarchy. Offset must be zero at this point, but our caller is supposed
5326 // to Assert that.
5327 if (BaseNode->getNumOperands() == 2)
5328 return cast<MDNode>(BaseNode->getOperand(1));
5330 unsigned FirstFieldOpNo = IsNewFormat ? 3 : 1;
5331 unsigned NumOpsPerField = IsNewFormat ? 3 : 2;
5332 for (unsigned Idx = FirstFieldOpNo; Idx < BaseNode->getNumOperands();
5333 Idx += NumOpsPerField) {
5334 auto *OffsetEntryCI =
5335 mdconst::extract<ConstantInt>(BaseNode->getOperand(Idx + 1));
5336 if (OffsetEntryCI->getValue().ugt(Offset)) {
5337 if (Idx == FirstFieldOpNo) {
5338 CheckFailed("Could not find TBAA parent in struct type node", &I,
5339 BaseNode, &Offset);
5340 return nullptr;
5343 unsigned PrevIdx = Idx - NumOpsPerField;
5344 auto *PrevOffsetEntryCI =
5345 mdconst::extract<ConstantInt>(BaseNode->getOperand(PrevIdx + 1));
5346 Offset -= PrevOffsetEntryCI->getValue();
5347 return cast<MDNode>(BaseNode->getOperand(PrevIdx));
5351 unsigned LastIdx = BaseNode->getNumOperands() - NumOpsPerField;
5352 auto *LastOffsetEntryCI = mdconst::extract<ConstantInt>(
5353 BaseNode->getOperand(LastIdx + 1));
5354 Offset -= LastOffsetEntryCI->getValue();
5355 return cast<MDNode>(BaseNode->getOperand(LastIdx));
5358 static bool isNewFormatTBAATypeNode(llvm::MDNode *Type) {
5359 if (!Type || Type->getNumOperands() < 3)
5360 return false;
5362 // In the new format type nodes shall have a reference to the parent type as
5363 // its first operand.
5364 MDNode *Parent = dyn_cast_or_null<MDNode>(Type->getOperand(0));
5365 if (!Parent)
5366 return false;
5368 return true;
5371 bool TBAAVerifier::visitTBAAMetadata(Instruction &I, const MDNode *MD) {
5372 AssertTBAA(isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
5373 isa<VAArgInst>(I) || isa<AtomicRMWInst>(I) ||
5374 isa<AtomicCmpXchgInst>(I),
5375 "This instruction shall not have a TBAA access tag!", &I);
5377 bool IsStructPathTBAA =
5378 isa<MDNode>(MD->getOperand(0)) && MD->getNumOperands() >= 3;
5380 AssertTBAA(
5381 IsStructPathTBAA,
5382 "Old-style TBAA is no longer allowed, use struct-path TBAA instead", &I);
5384 MDNode *BaseNode = dyn_cast_or_null<MDNode>(MD->getOperand(0));
5385 MDNode *AccessType = dyn_cast_or_null<MDNode>(MD->getOperand(1));
5387 bool IsNewFormat = isNewFormatTBAATypeNode(AccessType);
5389 if (IsNewFormat) {
5390 AssertTBAA(MD->getNumOperands() == 4 || MD->getNumOperands() == 5,
5391 "Access tag metadata must have either 4 or 5 operands", &I, MD);
5392 } else {
5393 AssertTBAA(MD->getNumOperands() < 5,
5394 "Struct tag metadata must have either 3 or 4 operands", &I, MD);
5397 // Check the access size field.
5398 if (IsNewFormat) {
5399 auto *AccessSizeNode = mdconst::dyn_extract_or_null<ConstantInt>(
5400 MD->getOperand(3));
5401 AssertTBAA(AccessSizeNode, "Access size field must be a constant", &I, MD);
5404 // Check the immutability flag.
5405 unsigned ImmutabilityFlagOpNo = IsNewFormat ? 4 : 3;
5406 if (MD->getNumOperands() == ImmutabilityFlagOpNo + 1) {
5407 auto *IsImmutableCI = mdconst::dyn_extract_or_null<ConstantInt>(
5408 MD->getOperand(ImmutabilityFlagOpNo));
5409 AssertTBAA(IsImmutableCI,
5410 "Immutability tag on struct tag metadata must be a constant",
5411 &I, MD);
5412 AssertTBAA(
5413 IsImmutableCI->isZero() || IsImmutableCI->isOne(),
5414 "Immutability part of the struct tag metadata must be either 0 or 1",
5415 &I, MD);
5418 AssertTBAA(BaseNode && AccessType,
5419 "Malformed struct tag metadata: base and access-type "
5420 "should be non-null and point to Metadata nodes",
5421 &I, MD, BaseNode, AccessType);
5423 if (!IsNewFormat) {
5424 AssertTBAA(isValidScalarTBAANode(AccessType),
5425 "Access type node must be a valid scalar type", &I, MD,
5426 AccessType);
5429 auto *OffsetCI = mdconst::dyn_extract_or_null<ConstantInt>(MD->getOperand(2));
5430 AssertTBAA(OffsetCI, "Offset must be constant integer", &I, MD);
5432 APInt Offset = OffsetCI->getValue();
5433 bool SeenAccessTypeInPath = false;
5435 SmallPtrSet<MDNode *, 4> StructPath;
5437 for (/* empty */; BaseNode && !IsRootTBAANode(BaseNode);
5438 BaseNode = getFieldNodeFromTBAABaseNode(I, BaseNode, Offset,
5439 IsNewFormat)) {
5440 if (!StructPath.insert(BaseNode).second) {
5441 CheckFailed("Cycle detected in struct path", &I, MD);
5442 return false;
5445 bool Invalid;
5446 unsigned BaseNodeBitWidth;
5447 std::tie(Invalid, BaseNodeBitWidth) = verifyTBAABaseNode(I, BaseNode,
5448 IsNewFormat);
5450 // If the base node is invalid in itself, then we've already printed all the
5451 // errors we wanted to print.
5452 if (Invalid)
5453 return false;
5455 SeenAccessTypeInPath |= BaseNode == AccessType;
5457 if (isValidScalarTBAANode(BaseNode) || BaseNode == AccessType)
5458 AssertTBAA(Offset == 0, "Offset not zero at the point of scalar access",
5459 &I, MD, &Offset);
5461 AssertTBAA(BaseNodeBitWidth == Offset.getBitWidth() ||
5462 (BaseNodeBitWidth == 0 && Offset == 0) ||
5463 (IsNewFormat && BaseNodeBitWidth == ~0u),
5464 "Access bit-width not the same as description bit-width", &I, MD,
5465 BaseNodeBitWidth, Offset.getBitWidth());
5467 if (IsNewFormat && SeenAccessTypeInPath)
5468 break;
5471 AssertTBAA(SeenAccessTypeInPath, "Did not see access type in access path!",
5472 &I, MD);
5473 return true;
5476 char VerifierLegacyPass::ID = 0;
5477 INITIALIZE_PASS(VerifierLegacyPass, "verify", "Module Verifier", false, false)
5479 FunctionPass *llvm::createVerifierPass(bool FatalErrors) {
5480 return new VerifierLegacyPass(FatalErrors);
5483 AnalysisKey VerifierAnalysis::Key;
5484 VerifierAnalysis::Result VerifierAnalysis::run(Module &M,
5485 ModuleAnalysisManager &) {
5486 Result Res;
5487 Res.IRBroken = llvm::verifyModule(M, &dbgs(), &Res.DebugInfoBroken);
5488 return Res;
5491 VerifierAnalysis::Result VerifierAnalysis::run(Function &F,
5492 FunctionAnalysisManager &) {
5493 return { llvm::verifyFunction(F, &dbgs()), false };
5496 PreservedAnalyses VerifierPass::run(Module &M, ModuleAnalysisManager &AM) {
5497 auto Res = AM.getResult<VerifierAnalysis>(M);
5498 if (FatalErrors && (Res.IRBroken || Res.DebugInfoBroken))
5499 report_fatal_error("Broken module found, compilation aborted!");
5501 return PreservedAnalyses::all();
5504 PreservedAnalyses VerifierPass::run(Function &F, FunctionAnalysisManager &AM) {
5505 auto res = AM.getResult<VerifierAnalysis>(F);
5506 if (res.IRBroken && FatalErrors)
5507 report_fatal_error("Broken function found, compilation aborted!");
5509 return PreservedAnalyses::all();