[clang] Add test for CWG190 "Layout-compatible POD-struct types" (#121668)
[llvm-project.git] / llvm / lib / Analysis / Lint.cpp
blob4689451243cd96d84d752606ef21cd73b7a072b9
1 //===-- Lint.cpp - Check for common errors in LLVM IR ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass statically checks for common and easily-identified constructs
10 // which produce undefined or likely unintended behavior in LLVM IR.
12 // It is not a guarantee of correctness, in two ways. First, it isn't
13 // comprehensive. There are checks which could be done statically which are
14 // not yet implemented. Some of these are indicated by TODO comments, but
15 // those aren't comprehensive either. Second, many conditions cannot be
16 // checked statically. This pass does no dynamic instrumentation, so it
17 // can't check for all possible problems.
19 // Another limitation is that it assumes all code will be executed. A store
20 // through a null pointer in a basic block which is never reached is harmless,
21 // but this pass will warn about it anyway. This is the main reason why most
22 // of these checks live here instead of in the Verifier pass.
24 // Optimization passes may make conditions that this pass checks for more or
25 // less obvious. If an optimization pass appears to be introducing a warning,
26 // it may be that the optimization pass is merely exposing an existing
27 // condition in the code.
29 // This code may be run before instcombine. In many cases, instcombine checks
30 // for the same kinds of things and turns instructions with undefined behavior
31 // into unreachable (or equivalent). Because of this, this pass makes some
32 // effort to look through bitcasts and so on.
34 //===----------------------------------------------------------------------===//
36 #include "llvm/Analysis/Lint.h"
37 #include "llvm/ADT/APInt.h"
38 #include "llvm/ADT/ArrayRef.h"
39 #include "llvm/ADT/SmallPtrSet.h"
40 #include "llvm/ADT/Twine.h"
41 #include "llvm/Analysis/AliasAnalysis.h"
42 #include "llvm/Analysis/AssumptionCache.h"
43 #include "llvm/Analysis/BasicAliasAnalysis.h"
44 #include "llvm/Analysis/ConstantFolding.h"
45 #include "llvm/Analysis/InstructionSimplify.h"
46 #include "llvm/Analysis/Loads.h"
47 #include "llvm/Analysis/MemoryLocation.h"
48 #include "llvm/Analysis/ScopedNoAliasAA.h"
49 #include "llvm/Analysis/TargetLibraryInfo.h"
50 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/IR/Argument.h"
53 #include "llvm/IR/BasicBlock.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DerivedTypes.h"
58 #include "llvm/IR/Dominators.h"
59 #include "llvm/IR/Function.h"
60 #include "llvm/IR/GlobalVariable.h"
61 #include "llvm/IR/InstVisitor.h"
62 #include "llvm/IR/InstrTypes.h"
63 #include "llvm/IR/Instruction.h"
64 #include "llvm/IR/Instructions.h"
65 #include "llvm/IR/IntrinsicInst.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/PassManager.h"
68 #include "llvm/IR/Type.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/Support/AMDGPUAddrSpace.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/KnownBits.h"
73 #include "llvm/Support/raw_ostream.h"
74 #include <cassert>
75 #include <cstdint>
76 #include <iterator>
77 #include <string>
79 using namespace llvm;
81 static const char LintAbortOnErrorArgName[] = "lint-abort-on-error";
82 static cl::opt<bool>
83 LintAbortOnError(LintAbortOnErrorArgName, cl::init(false),
84 cl::desc("In the Lint pass, abort on errors."));
86 namespace {
87 namespace MemRef {
88 static const unsigned Read = 1;
89 static const unsigned Write = 2;
90 static const unsigned Callee = 4;
91 static const unsigned Branchee = 8;
92 } // end namespace MemRef
94 class Lint : public InstVisitor<Lint> {
95 friend class InstVisitor<Lint>;
97 void visitFunction(Function &F);
99 void visitCallBase(CallBase &CB);
100 void visitMemoryReference(Instruction &I, const MemoryLocation &Loc,
101 MaybeAlign Alignment, Type *Ty, unsigned Flags);
103 void visitReturnInst(ReturnInst &I);
104 void visitLoadInst(LoadInst &I);
105 void visitStoreInst(StoreInst &I);
106 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I);
107 void visitAtomicRMWInst(AtomicRMWInst &I);
108 void visitXor(BinaryOperator &I);
109 void visitSub(BinaryOperator &I);
110 void visitLShr(BinaryOperator &I);
111 void visitAShr(BinaryOperator &I);
112 void visitShl(BinaryOperator &I);
113 void visitSDiv(BinaryOperator &I);
114 void visitUDiv(BinaryOperator &I);
115 void visitSRem(BinaryOperator &I);
116 void visitURem(BinaryOperator &I);
117 void visitAllocaInst(AllocaInst &I);
118 void visitVAArgInst(VAArgInst &I);
119 void visitIndirectBrInst(IndirectBrInst &I);
120 void visitExtractElementInst(ExtractElementInst &I);
121 void visitInsertElementInst(InsertElementInst &I);
122 void visitUnreachableInst(UnreachableInst &I);
124 Value *findValue(Value *V, bool OffsetOk) const;
125 Value *findValueImpl(Value *V, bool OffsetOk,
126 SmallPtrSetImpl<Value *> &Visited) const;
128 public:
129 Module *Mod;
130 Triple TT;
131 const DataLayout *DL;
132 AliasAnalysis *AA;
133 AssumptionCache *AC;
134 DominatorTree *DT;
135 TargetLibraryInfo *TLI;
137 std::string Messages;
138 raw_string_ostream MessagesStr;
140 Lint(Module *Mod, const DataLayout *DL, AliasAnalysis *AA,
141 AssumptionCache *AC, DominatorTree *DT, TargetLibraryInfo *TLI)
142 : Mod(Mod), TT(Triple::normalize(Mod->getTargetTriple())), DL(DL), AA(AA),
143 AC(AC), DT(DT), TLI(TLI), MessagesStr(Messages) {}
145 void WriteValues(ArrayRef<const Value *> Vs) {
146 for (const Value *V : Vs) {
147 if (!V)
148 continue;
149 if (isa<Instruction>(V)) {
150 MessagesStr << *V << '\n';
151 } else {
152 V->printAsOperand(MessagesStr, true, Mod);
153 MessagesStr << '\n';
158 /// A check failed, so printout out the condition and the message.
160 /// This provides a nice place to put a breakpoint if you want to see why
161 /// something is not correct.
162 void CheckFailed(const Twine &Message) { MessagesStr << Message << '\n'; }
164 /// A check failed (with values to print).
166 /// This calls the Message-only version so that the above is easier to set
167 /// a breakpoint on.
168 template <typename T1, typename... Ts>
169 void CheckFailed(const Twine &Message, const T1 &V1, const Ts &... Vs) {
170 CheckFailed(Message);
171 WriteValues({V1, Vs...});
174 } // end anonymous namespace
176 // Check - We know that cond should be true, if not print an error message.
177 #define Check(C, ...) \
178 do { \
179 if (!(C)) { \
180 CheckFailed(__VA_ARGS__); \
181 return; \
183 } while (false)
185 void Lint::visitFunction(Function &F) {
186 // This isn't undefined behavior, it's just a little unusual, and it's a
187 // fairly common mistake to neglect to name a function.
188 Check(F.hasName() || F.hasLocalLinkage(),
189 "Unusual: Unnamed function with non-local linkage", &F);
191 // TODO: Check for irreducible control flow.
194 void Lint::visitCallBase(CallBase &I) {
195 Value *Callee = I.getCalledOperand();
197 visitMemoryReference(I, MemoryLocation::getAfter(Callee), std::nullopt,
198 nullptr, MemRef::Callee);
200 if (Function *F = dyn_cast<Function>(findValue(Callee,
201 /*OffsetOk=*/false))) {
202 Check(I.getCallingConv() == F->getCallingConv(),
203 "Undefined behavior: Caller and callee calling convention differ",
204 &I);
206 FunctionType *FT = F->getFunctionType();
207 unsigned NumActualArgs = I.arg_size();
209 Check(FT->isVarArg() ? FT->getNumParams() <= NumActualArgs
210 : FT->getNumParams() == NumActualArgs,
211 "Undefined behavior: Call argument count mismatches callee "
212 "argument count",
213 &I);
215 Check(FT->getReturnType() == I.getType(),
216 "Undefined behavior: Call return type mismatches "
217 "callee return type",
218 &I);
220 // Check argument types (in case the callee was casted) and attributes.
221 // TODO: Verify that caller and callee attributes are compatible.
222 Function::arg_iterator PI = F->arg_begin(), PE = F->arg_end();
223 auto AI = I.arg_begin(), AE = I.arg_end();
224 for (; AI != AE; ++AI) {
225 Value *Actual = *AI;
226 if (PI != PE) {
227 Argument *Formal = &*PI++;
228 Check(Formal->getType() == Actual->getType(),
229 "Undefined behavior: Call argument type mismatches "
230 "callee parameter type",
231 &I);
233 // Check that noalias arguments don't alias other arguments. This is
234 // not fully precise because we don't know the sizes of the dereferenced
235 // memory regions.
236 if (Formal->hasNoAliasAttr() && Actual->getType()->isPointerTy()) {
237 AttributeList PAL = I.getAttributes();
238 unsigned ArgNo = 0;
239 for (auto *BI = I.arg_begin(); BI != AE; ++BI, ++ArgNo) {
240 // Skip ByVal arguments since they will be memcpy'd to the callee's
241 // stack so we're not really passing the pointer anyway.
242 if (PAL.hasParamAttr(ArgNo, Attribute::ByVal))
243 continue;
244 // If both arguments are readonly, they have no dependence.
245 if (Formal->onlyReadsMemory() && I.onlyReadsMemory(ArgNo))
246 continue;
247 // Skip readnone arguments since those are guaranteed not to be
248 // dereferenced anyway.
249 if (I.doesNotAccessMemory(ArgNo))
250 continue;
251 if (AI != BI && (*BI)->getType()->isPointerTy() &&
252 !isa<ConstantPointerNull>(*BI)) {
253 AliasResult Result = AA->alias(*AI, *BI);
254 Check(Result != AliasResult::MustAlias &&
255 Result != AliasResult::PartialAlias,
256 "Unusual: noalias argument aliases another argument", &I);
261 // Check that an sret argument points to valid memory.
262 if (Formal->hasStructRetAttr() && Actual->getType()->isPointerTy()) {
263 Type *Ty = Formal->getParamStructRetType();
264 MemoryLocation Loc(
265 Actual, LocationSize::precise(DL->getTypeStoreSize(Ty)));
266 visitMemoryReference(I, Loc, DL->getABITypeAlign(Ty), Ty,
267 MemRef::Read | MemRef::Write);
273 if (const auto *CI = dyn_cast<CallInst>(&I)) {
274 if (CI->isTailCall()) {
275 const AttributeList &PAL = CI->getAttributes();
276 unsigned ArgNo = 0;
277 for (Value *Arg : I.args()) {
278 // Skip ByVal arguments since they will be memcpy'd to the callee's
279 // stack anyway.
280 if (PAL.hasParamAttr(ArgNo++, Attribute::ByVal))
281 continue;
282 Value *Obj = findValue(Arg, /*OffsetOk=*/true);
283 Check(!isa<AllocaInst>(Obj),
284 "Undefined behavior: Call with \"tail\" keyword references "
285 "alloca",
286 &I);
291 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I))
292 switch (II->getIntrinsicID()) {
293 default:
294 break;
296 // TODO: Check more intrinsics
298 case Intrinsic::memcpy:
299 case Intrinsic::memcpy_inline: {
300 MemCpyInst *MCI = cast<MemCpyInst>(&I);
301 visitMemoryReference(I, MemoryLocation::getForDest(MCI),
302 MCI->getDestAlign(), nullptr, MemRef::Write);
303 visitMemoryReference(I, MemoryLocation::getForSource(MCI),
304 MCI->getSourceAlign(), nullptr, MemRef::Read);
306 // Check that the memcpy arguments don't overlap. The AliasAnalysis API
307 // isn't expressive enough for what we really want to do. Known partial
308 // overlap is not distinguished from the case where nothing is known.
309 auto Size = LocationSize::afterPointer();
310 if (const ConstantInt *Len =
311 dyn_cast<ConstantInt>(findValue(MCI->getLength(),
312 /*OffsetOk=*/false)))
313 if (Len->getValue().isIntN(32))
314 Size = LocationSize::precise(Len->getValue().getZExtValue());
315 Check(AA->alias(MCI->getSource(), Size, MCI->getDest(), Size) !=
316 AliasResult::MustAlias,
317 "Undefined behavior: memcpy source and destination overlap", &I);
318 break;
320 case Intrinsic::memmove: {
321 MemMoveInst *MMI = cast<MemMoveInst>(&I);
322 visitMemoryReference(I, MemoryLocation::getForDest(MMI),
323 MMI->getDestAlign(), nullptr, MemRef::Write);
324 visitMemoryReference(I, MemoryLocation::getForSource(MMI),
325 MMI->getSourceAlign(), nullptr, MemRef::Read);
326 break;
328 case Intrinsic::memset: {
329 MemSetInst *MSI = cast<MemSetInst>(&I);
330 visitMemoryReference(I, MemoryLocation::getForDest(MSI),
331 MSI->getDestAlign(), nullptr, MemRef::Write);
332 break;
334 case Intrinsic::memset_inline: {
335 MemSetInlineInst *MSII = cast<MemSetInlineInst>(&I);
336 visitMemoryReference(I, MemoryLocation::getForDest(MSII),
337 MSII->getDestAlign(), nullptr, MemRef::Write);
338 break;
341 case Intrinsic::vastart:
342 // vastart in non-varargs function is rejected by the verifier
343 visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
344 std::nullopt, nullptr, MemRef::Read | MemRef::Write);
345 break;
346 case Intrinsic::vacopy:
347 visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
348 std::nullopt, nullptr, MemRef::Write);
349 visitMemoryReference(I, MemoryLocation::getForArgument(&I, 1, TLI),
350 std::nullopt, nullptr, MemRef::Read);
351 break;
352 case Intrinsic::vaend:
353 visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
354 std::nullopt, nullptr, MemRef::Read | MemRef::Write);
355 break;
357 case Intrinsic::stackrestore:
358 // Stackrestore doesn't read or write memory, but it sets the
359 // stack pointer, which the compiler may read from or write to
360 // at any time, so check it for both readability and writeability.
361 visitMemoryReference(I, MemoryLocation::getForArgument(&I, 0, TLI),
362 std::nullopt, nullptr, MemRef::Read | MemRef::Write);
363 break;
364 case Intrinsic::get_active_lane_mask:
365 if (auto *TripCount = dyn_cast<ConstantInt>(I.getArgOperand(1)))
366 Check(!TripCount->isZero(),
367 "get_active_lane_mask: operand #2 "
368 "must be greater than 0",
369 &I);
370 break;
374 void Lint::visitReturnInst(ReturnInst &I) {
375 Function *F = I.getParent()->getParent();
376 Check(!F->doesNotReturn(),
377 "Unusual: Return statement in function with noreturn attribute", &I);
379 if (Value *V = I.getReturnValue()) {
380 Value *Obj = findValue(V, /*OffsetOk=*/true);
381 Check(!isa<AllocaInst>(Obj), "Unusual: Returning alloca value", &I);
385 // TODO: Check that the reference is in bounds.
386 // TODO: Check readnone/readonly function attributes.
387 void Lint::visitMemoryReference(Instruction &I, const MemoryLocation &Loc,
388 MaybeAlign Align, Type *Ty, unsigned Flags) {
389 // If no memory is being referenced, it doesn't matter if the pointer
390 // is valid.
391 if (Loc.Size.isZero())
392 return;
394 Value *Ptr = const_cast<Value *>(Loc.Ptr);
395 Value *UnderlyingObject = findValue(Ptr, /*OffsetOk=*/true);
396 Check(!isa<ConstantPointerNull>(UnderlyingObject),
397 "Undefined behavior: Null pointer dereference", &I);
398 Check(!isa<UndefValue>(UnderlyingObject),
399 "Undefined behavior: Undef pointer dereference", &I);
400 Check(!isa<ConstantInt>(UnderlyingObject) ||
401 !cast<ConstantInt>(UnderlyingObject)->isMinusOne(),
402 "Unusual: All-ones pointer dereference", &I);
403 Check(!isa<ConstantInt>(UnderlyingObject) ||
404 !cast<ConstantInt>(UnderlyingObject)->isOne(),
405 "Unusual: Address one pointer dereference", &I);
407 if (Flags & MemRef::Write) {
408 if (TT.isAMDGPU())
409 Check(!AMDGPU::isConstantAddressSpace(
410 UnderlyingObject->getType()->getPointerAddressSpace()),
411 "Undefined behavior: Write to memory in const addrspace", &I);
413 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(UnderlyingObject))
414 Check(!GV->isConstant(), "Undefined behavior: Write to read-only memory",
415 &I);
416 Check(!isa<Function>(UnderlyingObject) &&
417 !isa<BlockAddress>(UnderlyingObject),
418 "Undefined behavior: Write to text section", &I);
420 if (Flags & MemRef::Read) {
421 Check(!isa<Function>(UnderlyingObject), "Unusual: Load from function body",
422 &I);
423 Check(!isa<BlockAddress>(UnderlyingObject),
424 "Undefined behavior: Load from block address", &I);
426 if (Flags & MemRef::Callee) {
427 Check(!isa<BlockAddress>(UnderlyingObject),
428 "Undefined behavior: Call to block address", &I);
430 if (Flags & MemRef::Branchee) {
431 Check(!isa<Constant>(UnderlyingObject) ||
432 isa<BlockAddress>(UnderlyingObject),
433 "Undefined behavior: Branch to non-blockaddress", &I);
436 // Check for buffer overflows and misalignment.
437 // Only handles memory references that read/write something simple like an
438 // alloca instruction or a global variable.
439 int64_t Offset = 0;
440 if (Value *Base = GetPointerBaseWithConstantOffset(Ptr, Offset, *DL)) {
441 // OK, so the access is to a constant offset from Ptr. Check that Ptr is
442 // something we can handle and if so extract the size of this base object
443 // along with its alignment.
444 uint64_t BaseSize = MemoryLocation::UnknownSize;
445 MaybeAlign BaseAlign;
447 if (AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
448 Type *ATy = AI->getAllocatedType();
449 if (!AI->isArrayAllocation() && ATy->isSized() && !ATy->isScalableTy())
450 BaseSize = DL->getTypeAllocSize(ATy).getFixedValue();
451 BaseAlign = AI->getAlign();
452 } else if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
453 // If the global may be defined differently in another compilation unit
454 // then don't warn about funky memory accesses.
455 if (GV->hasDefinitiveInitializer()) {
456 Type *GTy = GV->getValueType();
457 if (GTy->isSized())
458 BaseSize = DL->getTypeAllocSize(GTy);
459 BaseAlign = GV->getAlign();
460 if (!BaseAlign && GTy->isSized())
461 BaseAlign = DL->getABITypeAlign(GTy);
465 // Accesses from before the start or after the end of the object are not
466 // defined.
467 Check(!Loc.Size.hasValue() || Loc.Size.isScalable() ||
468 BaseSize == MemoryLocation::UnknownSize ||
469 (Offset >= 0 && Offset + Loc.Size.getValue() <= BaseSize),
470 "Undefined behavior: Buffer overflow", &I);
472 // Accesses that say that the memory is more aligned than it is are not
473 // defined.
474 if (!Align && Ty && Ty->isSized())
475 Align = DL->getABITypeAlign(Ty);
476 if (BaseAlign && Align)
477 Check(*Align <= commonAlignment(*BaseAlign, Offset),
478 "Undefined behavior: Memory reference address is misaligned", &I);
482 void Lint::visitLoadInst(LoadInst &I) {
483 visitMemoryReference(I, MemoryLocation::get(&I), I.getAlign(), I.getType(),
484 MemRef::Read);
487 void Lint::visitStoreInst(StoreInst &I) {
488 visitMemoryReference(I, MemoryLocation::get(&I), I.getAlign(),
489 I.getOperand(0)->getType(), MemRef::Write);
492 void Lint::visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
493 visitMemoryReference(I, MemoryLocation::get(&I), I.getAlign(),
494 I.getOperand(0)->getType(), MemRef::Write);
497 void Lint::visitAtomicRMWInst(AtomicRMWInst &I) {
498 visitMemoryReference(I, MemoryLocation::get(&I), I.getAlign(),
499 I.getOperand(0)->getType(), MemRef::Write);
502 void Lint::visitXor(BinaryOperator &I) {
503 Check(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
504 "Undefined result: xor(undef, undef)", &I);
507 void Lint::visitSub(BinaryOperator &I) {
508 Check(!isa<UndefValue>(I.getOperand(0)) || !isa<UndefValue>(I.getOperand(1)),
509 "Undefined result: sub(undef, undef)", &I);
512 void Lint::visitLShr(BinaryOperator &I) {
513 if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getOperand(1),
514 /*OffsetOk=*/false)))
515 Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
516 "Undefined result: Shift count out of range", &I);
519 void Lint::visitAShr(BinaryOperator &I) {
520 if (ConstantInt *CI =
521 dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
522 Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
523 "Undefined result: Shift count out of range", &I);
526 void Lint::visitShl(BinaryOperator &I) {
527 if (ConstantInt *CI =
528 dyn_cast<ConstantInt>(findValue(I.getOperand(1), /*OffsetOk=*/false)))
529 Check(CI->getValue().ult(cast<IntegerType>(I.getType())->getBitWidth()),
530 "Undefined result: Shift count out of range", &I);
533 static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT,
534 AssumptionCache *AC) {
535 // Assume undef could be zero.
536 if (isa<UndefValue>(V))
537 return true;
539 VectorType *VecTy = dyn_cast<VectorType>(V->getType());
540 if (!VecTy) {
541 KnownBits Known =
542 computeKnownBits(V, DL, 0, AC, dyn_cast<Instruction>(V), DT);
543 return Known.isZero();
546 // Per-component check doesn't work with zeroinitializer
547 Constant *C = dyn_cast<Constant>(V);
548 if (!C)
549 return false;
551 if (C->isZeroValue())
552 return true;
554 // For a vector, KnownZero will only be true if all values are zero, so check
555 // this per component
556 for (unsigned I = 0, N = cast<FixedVectorType>(VecTy)->getNumElements();
557 I != N; ++I) {
558 Constant *Elem = C->getAggregateElement(I);
559 if (isa<UndefValue>(Elem))
560 return true;
562 KnownBits Known = computeKnownBits(Elem, DL);
563 if (Known.isZero())
564 return true;
567 return false;
570 void Lint::visitSDiv(BinaryOperator &I) {
571 Check(!isZero(I.getOperand(1), I.getDataLayout(), DT, AC),
572 "Undefined behavior: Division by zero", &I);
575 void Lint::visitUDiv(BinaryOperator &I) {
576 Check(!isZero(I.getOperand(1), I.getDataLayout(), DT, AC),
577 "Undefined behavior: Division by zero", &I);
580 void Lint::visitSRem(BinaryOperator &I) {
581 Check(!isZero(I.getOperand(1), I.getDataLayout(), DT, AC),
582 "Undefined behavior: Division by zero", &I);
585 void Lint::visitURem(BinaryOperator &I) {
586 Check(!isZero(I.getOperand(1), I.getDataLayout(), DT, AC),
587 "Undefined behavior: Division by zero", &I);
590 void Lint::visitAllocaInst(AllocaInst &I) {
591 if (isa<ConstantInt>(I.getArraySize()))
592 // This isn't undefined behavior, it's just an obvious pessimization.
593 Check(&I.getParent()->getParent()->getEntryBlock() == I.getParent(),
594 "Pessimization: Static alloca outside of entry block", &I);
596 // TODO: Check for an unusual size (MSB set?)
599 void Lint::visitVAArgInst(VAArgInst &I) {
600 visitMemoryReference(I, MemoryLocation::get(&I), std::nullopt, nullptr,
601 MemRef::Read | MemRef::Write);
604 void Lint::visitIndirectBrInst(IndirectBrInst &I) {
605 visitMemoryReference(I, MemoryLocation::getAfter(I.getAddress()),
606 std::nullopt, nullptr, MemRef::Branchee);
608 Check(I.getNumDestinations() != 0,
609 "Undefined behavior: indirectbr with no destinations", &I);
612 void Lint::visitExtractElementInst(ExtractElementInst &I) {
613 if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getIndexOperand(),
614 /*OffsetOk=*/false))) {
615 ElementCount EC = I.getVectorOperandType()->getElementCount();
616 Check(EC.isScalable() || CI->getValue().ult(EC.getFixedValue()),
617 "Undefined result: extractelement index out of range", &I);
621 void Lint::visitInsertElementInst(InsertElementInst &I) {
622 if (ConstantInt *CI = dyn_cast<ConstantInt>(findValue(I.getOperand(2),
623 /*OffsetOk=*/false))) {
624 ElementCount EC = I.getType()->getElementCount();
625 Check(EC.isScalable() || CI->getValue().ult(EC.getFixedValue()),
626 "Undefined result: insertelement index out of range", &I);
630 void Lint::visitUnreachableInst(UnreachableInst &I) {
631 // This isn't undefined behavior, it's merely suspicious.
632 Check(&I == &I.getParent()->front() ||
633 std::prev(I.getIterator())->mayHaveSideEffects(),
634 "Unusual: unreachable immediately preceded by instruction without "
635 "side effects",
636 &I);
639 /// findValue - Look through bitcasts and simple memory reference patterns
640 /// to identify an equivalent, but more informative, value. If OffsetOk
641 /// is true, look through getelementptrs with non-zero offsets too.
643 /// Most analysis passes don't require this logic, because instcombine
644 /// will simplify most of these kinds of things away. But it's a goal of
645 /// this Lint pass to be useful even on non-optimized IR.
646 Value *Lint::findValue(Value *V, bool OffsetOk) const {
647 SmallPtrSet<Value *, 4> Visited;
648 return findValueImpl(V, OffsetOk, Visited);
651 /// findValueImpl - Implementation helper for findValue.
652 Value *Lint::findValueImpl(Value *V, bool OffsetOk,
653 SmallPtrSetImpl<Value *> &Visited) const {
654 // Detect self-referential values.
655 if (!Visited.insert(V).second)
656 return PoisonValue::get(V->getType());
658 // TODO: Look through sext or zext cast, when the result is known to
659 // be interpreted as signed or unsigned, respectively.
660 // TODO: Look through eliminable cast pairs.
661 // TODO: Look through calls with unique return values.
662 // TODO: Look through vector insert/extract/shuffle.
663 V = OffsetOk ? getUnderlyingObject(V) : V->stripPointerCasts();
664 if (LoadInst *L = dyn_cast<LoadInst>(V)) {
665 BasicBlock::iterator BBI = L->getIterator();
666 BasicBlock *BB = L->getParent();
667 SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
668 BatchAAResults BatchAA(*AA);
669 for (;;) {
670 if (!VisitedBlocks.insert(BB).second)
671 break;
672 if (Value *U =
673 FindAvailableLoadedValue(L, BB, BBI, DefMaxInstsToScan, &BatchAA))
674 return findValueImpl(U, OffsetOk, Visited);
675 if (BBI != BB->begin())
676 break;
677 BB = BB->getUniquePredecessor();
678 if (!BB)
679 break;
680 BBI = BB->end();
682 } else if (PHINode *PN = dyn_cast<PHINode>(V)) {
683 if (Value *W = PN->hasConstantValue())
684 return findValueImpl(W, OffsetOk, Visited);
685 } else if (CastInst *CI = dyn_cast<CastInst>(V)) {
686 if (CI->isNoopCast(*DL))
687 return findValueImpl(CI->getOperand(0), OffsetOk, Visited);
688 } else if (ExtractValueInst *Ex = dyn_cast<ExtractValueInst>(V)) {
689 if (Value *W =
690 FindInsertedValue(Ex->getAggregateOperand(), Ex->getIndices()))
691 if (W != V)
692 return findValueImpl(W, OffsetOk, Visited);
693 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
694 // Same as above, but for ConstantExpr instead of Instruction.
695 if (Instruction::isCast(CE->getOpcode())) {
696 if (CastInst::isNoopCast(Instruction::CastOps(CE->getOpcode()),
697 CE->getOperand(0)->getType(), CE->getType(),
698 *DL))
699 return findValueImpl(CE->getOperand(0), OffsetOk, Visited);
703 // As a last resort, try SimplifyInstruction or constant folding.
704 if (Instruction *Inst = dyn_cast<Instruction>(V)) {
705 if (Value *W = simplifyInstruction(Inst, {*DL, TLI, DT, AC}))
706 return findValueImpl(W, OffsetOk, Visited);
707 } else if (auto *C = dyn_cast<Constant>(V)) {
708 Value *W = ConstantFoldConstant(C, *DL, TLI);
709 if (W != V)
710 return findValueImpl(W, OffsetOk, Visited);
713 return V;
716 PreservedAnalyses LintPass::run(Function &F, FunctionAnalysisManager &AM) {
717 auto *Mod = F.getParent();
718 auto *DL = &F.getDataLayout();
719 auto *AA = &AM.getResult<AAManager>(F);
720 auto *AC = &AM.getResult<AssumptionAnalysis>(F);
721 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
722 auto *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
723 Lint L(Mod, DL, AA, AC, DT, TLI);
724 L.visit(F);
725 dbgs() << L.MessagesStr.str();
726 if (LintAbortOnError && !L.MessagesStr.str().empty())
727 report_fatal_error(Twine("Linter found errors, aborting. (enabled by --") +
728 LintAbortOnErrorArgName + ")",
729 false);
730 return PreservedAnalyses::all();
733 //===----------------------------------------------------------------------===//
734 // Implement the public interfaces to this file...
735 //===----------------------------------------------------------------------===//
737 /// lintFunction - Check a function for errors, printing messages on stderr.
739 void llvm::lintFunction(const Function &f) {
740 Function &F = const_cast<Function &>(f);
741 assert(!F.isDeclaration() && "Cannot lint external functions");
743 FunctionAnalysisManager FAM;
744 FAM.registerPass([&] { return TargetLibraryAnalysis(); });
745 FAM.registerPass([&] { return DominatorTreeAnalysis(); });
746 FAM.registerPass([&] { return AssumptionAnalysis(); });
747 FAM.registerPass([&] {
748 AAManager AA;
749 AA.registerFunctionAnalysis<BasicAA>();
750 AA.registerFunctionAnalysis<ScopedNoAliasAA>();
751 AA.registerFunctionAnalysis<TypeBasedAA>();
752 return AA;
754 LintPass().run(F, FAM);
757 /// lintModule - Check a module for errors, printing messages on stderr.
759 void llvm::lintModule(const Module &M) {
760 for (const Function &F : M) {
761 if (!F.isDeclaration())
762 lintFunction(F);