1 //=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines analysis_warnings::[Policy,Executor].
10 // Together they are used by Sema to issue warnings based on inexpensive
11 // static analysis algorithms in libAnalysis.
13 //===----------------------------------------------------------------------===//
15 #include "clang/Sema/AnalysisBasedWarnings.h"
16 #include "clang/AST/DeclCXX.h"
17 #include "clang/AST/DeclObjC.h"
18 #include "clang/AST/EvaluatedExprVisitor.h"
19 #include "clang/AST/ExprCXX.h"
20 #include "clang/AST/ExprObjC.h"
21 #include "clang/AST/ParentMap.h"
22 #include "clang/AST/RecursiveASTVisitor.h"
23 #include "clang/AST/StmtCXX.h"
24 #include "clang/AST/StmtObjC.h"
25 #include "clang/AST/StmtVisitor.h"
26 #include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
27 #include "clang/Analysis/Analyses/CalledOnceCheck.h"
28 #include "clang/Analysis/Analyses/Consumed.h"
29 #include "clang/Analysis/Analyses/ReachableCode.h"
30 #include "clang/Analysis/Analyses/ThreadSafety.h"
31 #include "clang/Analysis/Analyses/UninitializedValues.h"
32 #include "clang/Analysis/AnalysisDeclContext.h"
33 #include "clang/Analysis/CFG.h"
34 #include "clang/Analysis/CFGStmtMap.h"
35 #include "clang/Basic/SourceLocation.h"
36 #include "clang/Basic/SourceManager.h"
37 #include "clang/Lex/Preprocessor.h"
38 #include "clang/Sema/ScopeInfo.h"
39 #include "clang/Sema/SemaInternal.h"
40 #include "llvm/ADT/ArrayRef.h"
41 #include "llvm/ADT/BitVector.h"
42 #include "llvm/ADT/MapVector.h"
43 #include "llvm/ADT/SmallString.h"
44 #include "llvm/ADT/SmallVector.h"
45 #include "llvm/ADT/StringRef.h"
46 #include "llvm/Support/Casting.h"
51 using namespace clang
;
53 //===----------------------------------------------------------------------===//
54 // Unreachable code analysis.
55 //===----------------------------------------------------------------------===//
58 class UnreachableCodeHandler
: public reachable_code::Callback
{
60 SourceRange PreviousSilenceableCondVal
;
63 UnreachableCodeHandler(Sema
&s
) : S(s
) {}
65 void HandleUnreachable(reachable_code::UnreachableKind UK
,
67 SourceRange SilenceableCondVal
,
69 SourceRange R2
) override
{
70 // Avoid reporting multiple unreachable code diagnostics that are
71 // triggered by the same conditional value.
72 if (PreviousSilenceableCondVal
.isValid() &&
73 SilenceableCondVal
.isValid() &&
74 PreviousSilenceableCondVal
== SilenceableCondVal
)
76 PreviousSilenceableCondVal
= SilenceableCondVal
;
78 unsigned diag
= diag::warn_unreachable
;
80 case reachable_code::UK_Break
:
81 diag
= diag::warn_unreachable_break
;
83 case reachable_code::UK_Return
:
84 diag
= diag::warn_unreachable_return
;
86 case reachable_code::UK_Loop_Increment
:
87 diag
= diag::warn_unreachable_loop_increment
;
89 case reachable_code::UK_Other
:
93 S
.Diag(L
, diag
) << R1
<< R2
;
95 SourceLocation Open
= SilenceableCondVal
.getBegin();
97 SourceLocation Close
= SilenceableCondVal
.getEnd();
98 Close
= S
.getLocForEndOfToken(Close
);
99 if (Close
.isValid()) {
100 S
.Diag(Open
, diag::note_unreachable_silence
)
101 << FixItHint::CreateInsertion(Open
, "/* DISABLES CODE */ (")
102 << FixItHint::CreateInsertion(Close
, ")");
107 } // anonymous namespace
109 /// CheckUnreachable - Check for unreachable code.
110 static void CheckUnreachable(Sema
&S
, AnalysisDeclContext
&AC
) {
111 // As a heuristic prune all diagnostics not in the main file. Currently
112 // the majority of warnings in headers are false positives. These
113 // are largely caused by configuration state, e.g. preprocessor
114 // defined code, etc.
116 // Note that this is also a performance optimization. Analyzing
117 // headers many times can be expensive.
118 if (!S
.getSourceManager().isInMainFile(AC
.getDecl()->getBeginLoc()))
121 UnreachableCodeHandler
UC(S
);
122 reachable_code::FindUnreachableCode(AC
, S
.getPreprocessor(), UC
);
126 /// Warn on logical operator errors in CFGBuilder
127 class LogicalErrorHandler
: public CFGCallback
{
131 LogicalErrorHandler(Sema
&S
) : S(S
) {}
133 static bool HasMacroID(const Expr
*E
) {
134 if (E
->getExprLoc().isMacroID())
137 // Recurse to children.
138 for (const Stmt
*SubStmt
: E
->children())
139 if (const Expr
*SubExpr
= dyn_cast_or_null
<Expr
>(SubStmt
))
140 if (HasMacroID(SubExpr
))
146 void compareAlwaysTrue(const BinaryOperator
*B
, bool isAlwaysTrue
) override
{
150 SourceRange DiagRange
= B
->getSourceRange();
151 S
.Diag(B
->getExprLoc(), diag::warn_tautological_overlap_comparison
)
152 << DiagRange
<< isAlwaysTrue
;
155 void compareBitwiseEquality(const BinaryOperator
*B
,
156 bool isAlwaysTrue
) override
{
160 SourceRange DiagRange
= B
->getSourceRange();
161 S
.Diag(B
->getExprLoc(), diag::warn_comparison_bitwise_always
)
162 << DiagRange
<< isAlwaysTrue
;
165 void compareBitwiseOr(const BinaryOperator
*B
) override
{
169 SourceRange DiagRange
= B
->getSourceRange();
170 S
.Diag(B
->getExprLoc(), diag::warn_comparison_bitwise_or
) << DiagRange
;
173 static bool hasActiveDiagnostics(DiagnosticsEngine
&Diags
,
174 SourceLocation Loc
) {
175 return !Diags
.isIgnored(diag::warn_tautological_overlap_comparison
, Loc
) ||
176 !Diags
.isIgnored(diag::warn_comparison_bitwise_or
, Loc
);
179 } // anonymous namespace
181 //===----------------------------------------------------------------------===//
182 // Check for infinite self-recursion in functions
183 //===----------------------------------------------------------------------===//
185 // Returns true if the function is called anywhere within the CFGBlock.
186 // For member functions, the additional condition of being call from the
187 // this pointer is required.
188 static bool hasRecursiveCallInPath(const FunctionDecl
*FD
, CFGBlock
&Block
) {
189 // Process all the Stmt's in this block to find any calls to FD.
190 for (const auto &B
: Block
) {
191 if (B
.getKind() != CFGElement::Statement
)
194 const CallExpr
*CE
= dyn_cast
<CallExpr
>(B
.getAs
<CFGStmt
>()->getStmt());
195 if (!CE
|| !CE
->getCalleeDecl() ||
196 CE
->getCalleeDecl()->getCanonicalDecl() != FD
)
199 // Skip function calls which are qualified with a templated class.
200 if (const DeclRefExpr
*DRE
=
201 dyn_cast
<DeclRefExpr
>(CE
->getCallee()->IgnoreParenImpCasts())) {
202 if (NestedNameSpecifier
*NNS
= DRE
->getQualifier()) {
203 if (NNS
->getKind() == NestedNameSpecifier::TypeSpec
&&
204 isa
<TemplateSpecializationType
>(NNS
->getAsType())) {
210 const CXXMemberCallExpr
*MCE
= dyn_cast
<CXXMemberCallExpr
>(CE
);
211 if (!MCE
|| isa
<CXXThisExpr
>(MCE
->getImplicitObjectArgument()) ||
212 !MCE
->getMethodDecl()->isVirtual())
218 // Returns true if every path from the entry block passes through a call to FD.
219 static bool checkForRecursiveFunctionCall(const FunctionDecl
*FD
, CFG
*cfg
) {
220 llvm::SmallPtrSet
<CFGBlock
*, 16> Visited
;
221 llvm::SmallVector
<CFGBlock
*, 16> WorkList
;
222 // Keep track of whether we found at least one recursive path.
223 bool foundRecursion
= false;
225 const unsigned ExitID
= cfg
->getExit().getBlockID();
227 // Seed the work list with the entry block.
228 WorkList
.push_back(&cfg
->getEntry());
230 while (!WorkList
.empty()) {
231 CFGBlock
*Block
= WorkList
.pop_back_val();
233 for (auto I
= Block
->succ_begin(), E
= Block
->succ_end(); I
!= E
; ++I
) {
234 if (CFGBlock
*SuccBlock
= *I
) {
235 if (!Visited
.insert(SuccBlock
).second
)
238 // Found a path to the exit node without a recursive call.
239 if (ExitID
== SuccBlock
->getBlockID())
242 // If the successor block contains a recursive call, end analysis there.
243 if (hasRecursiveCallInPath(FD
, *SuccBlock
)) {
244 foundRecursion
= true;
248 WorkList
.push_back(SuccBlock
);
252 return foundRecursion
;
255 static void checkRecursiveFunction(Sema
&S
, const FunctionDecl
*FD
,
256 const Stmt
*Body
, AnalysisDeclContext
&AC
) {
257 FD
= FD
->getCanonicalDecl();
259 // Only run on non-templated functions and non-templated members of
260 // templated classes.
261 if (FD
->getTemplatedKind() != FunctionDecl::TK_NonTemplate
&&
262 FD
->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization
)
265 CFG
*cfg
= AC
.getCFG();
268 // If the exit block is unreachable, skip processing the function.
269 if (cfg
->getExit().pred_empty())
272 // Emit diagnostic if a recursive function call is detected for all paths.
273 if (checkForRecursiveFunctionCall(FD
, cfg
))
274 S
.Diag(Body
->getBeginLoc(), diag::warn_infinite_recursive_function
);
277 //===----------------------------------------------------------------------===//
278 // Check for throw in a non-throwing function.
279 //===----------------------------------------------------------------------===//
281 /// Determine whether an exception thrown by E, unwinding from ThrowBlock,
282 /// can reach ExitBlock.
283 static bool throwEscapes(Sema
&S
, const CXXThrowExpr
*E
, CFGBlock
&ThrowBlock
,
285 SmallVector
<CFGBlock
*, 16> Stack
;
286 llvm::BitVector
Queued(Body
->getNumBlockIDs());
288 Stack
.push_back(&ThrowBlock
);
289 Queued
[ThrowBlock
.getBlockID()] = true;
291 while (!Stack
.empty()) {
292 CFGBlock
&UnwindBlock
= *Stack
.back();
295 for (auto &Succ
: UnwindBlock
.succs()) {
296 if (!Succ
.isReachable() || Queued
[Succ
->getBlockID()])
299 if (Succ
->getBlockID() == Body
->getExit().getBlockID())
303 dyn_cast_or_null
<CXXCatchStmt
>(Succ
->getLabel())) {
304 QualType Caught
= Catch
->getCaughtType();
305 if (Caught
.isNull() || // catch (...) catches everything
306 !E
->getSubExpr() || // throw; is considered cuaght by any handler
307 S
.handlerCanCatch(Caught
, E
->getSubExpr()->getType()))
308 // Exception doesn't escape via this path.
311 Stack
.push_back(Succ
);
312 Queued
[Succ
->getBlockID()] = true;
320 static void visitReachableThrows(
322 llvm::function_ref
<void(const CXXThrowExpr
*, CFGBlock
&)> Visit
) {
323 llvm::BitVector
Reachable(BodyCFG
->getNumBlockIDs());
324 clang::reachable_code::ScanReachableFromBlock(&BodyCFG
->getEntry(), Reachable
);
325 for (CFGBlock
*B
: *BodyCFG
) {
326 if (!Reachable
[B
->getBlockID()])
328 for (CFGElement
&E
: *B
) {
329 Optional
<CFGStmt
> S
= E
.getAs
<CFGStmt
>();
332 if (auto *Throw
= dyn_cast
<CXXThrowExpr
>(S
->getStmt()))
338 static void EmitDiagForCXXThrowInNonThrowingFunc(Sema
&S
, SourceLocation OpLoc
,
339 const FunctionDecl
*FD
) {
340 if (!S
.getSourceManager().isInSystemHeader(OpLoc
) &&
341 FD
->getTypeSourceInfo()) {
342 S
.Diag(OpLoc
, diag::warn_throw_in_noexcept_func
) << FD
;
343 if (S
.getLangOpts().CPlusPlus11
&&
344 (isa
<CXXDestructorDecl
>(FD
) ||
345 FD
->getDeclName().getCXXOverloadedOperator() == OO_Delete
||
346 FD
->getDeclName().getCXXOverloadedOperator() == OO_Array_Delete
)) {
347 if (const auto *Ty
= FD
->getTypeSourceInfo()->getType()->
348 getAs
<FunctionProtoType
>())
349 S
.Diag(FD
->getLocation(), diag::note_throw_in_dtor
)
350 << !isa
<CXXDestructorDecl
>(FD
) << !Ty
->hasExceptionSpec()
351 << FD
->getExceptionSpecSourceRange();
353 S
.Diag(FD
->getLocation(), diag::note_throw_in_function
)
354 << FD
->getExceptionSpecSourceRange();
358 static void checkThrowInNonThrowingFunc(Sema
&S
, const FunctionDecl
*FD
,
359 AnalysisDeclContext
&AC
) {
360 CFG
*BodyCFG
= AC
.getCFG();
363 if (BodyCFG
->getExit().pred_empty())
365 visitReachableThrows(BodyCFG
, [&](const CXXThrowExpr
*Throw
, CFGBlock
&Block
) {
366 if (throwEscapes(S
, Throw
, Block
, BodyCFG
))
367 EmitDiagForCXXThrowInNonThrowingFunc(S
, Throw
->getThrowLoc(), FD
);
371 static bool isNoexcept(const FunctionDecl
*FD
) {
372 const auto *FPT
= FD
->getType()->castAs
<FunctionProtoType
>();
373 if (FPT
->isNothrow() || FD
->hasAttr
<NoThrowAttr
>())
378 //===----------------------------------------------------------------------===//
379 // Check for missing return value.
380 //===----------------------------------------------------------------------===//
382 enum ControlFlowKind
{
387 NeverFallThroughOrReturn
390 /// CheckFallThrough - Check that we don't fall off the end of a
391 /// Statement that should return a value.
393 /// \returns AlwaysFallThrough iff we always fall off the end of the statement,
394 /// MaybeFallThrough iff we might or might not fall off the end,
395 /// NeverFallThroughOrReturn iff we never fall off the end of the statement or
396 /// return. We assume NeverFallThrough iff we never fall off the end of the
397 /// statement but we may return. We assume that functions not marked noreturn
399 static ControlFlowKind
CheckFallThrough(AnalysisDeclContext
&AC
) {
400 CFG
*cfg
= AC
.getCFG();
401 if (!cfg
) return UnknownFallThrough
;
403 // The CFG leaves in dead things, and we don't want the dead code paths to
404 // confuse us, so we mark all live things first.
405 llvm::BitVector
live(cfg
->getNumBlockIDs());
406 unsigned count
= reachable_code::ScanReachableFromBlock(&cfg
->getEntry(),
409 bool AddEHEdges
= AC
.getAddEHEdges();
410 if (!AddEHEdges
&& count
!= cfg
->getNumBlockIDs())
411 // When there are things remaining dead, and we didn't add EH edges
412 // from CallExprs to the catch clauses, we have to go back and
413 // mark them as live.
414 for (const auto *B
: *cfg
) {
415 if (!live
[B
->getBlockID()]) {
416 if (B
->pred_begin() == B
->pred_end()) {
417 const Stmt
*Term
= B
->getTerminatorStmt();
418 if (Term
&& isa
<CXXTryStmt
>(Term
))
419 // When not adding EH edges from calls, catch clauses
420 // can otherwise seem dead. Avoid noting them as dead.
421 count
+= reachable_code::ScanReachableFromBlock(B
, live
);
427 // Now we know what is live, we check the live precessors of the exit block
428 // and look for fall through paths, being careful to ignore normal returns,
429 // and exceptional paths.
430 bool HasLiveReturn
= false;
431 bool HasFakeEdge
= false;
432 bool HasPlainEdge
= false;
433 bool HasAbnormalEdge
= false;
435 // Ignore default cases that aren't likely to be reachable because all
436 // enums in a switch(X) have explicit case statements.
437 CFGBlock::FilterOptions FO
;
438 FO
.IgnoreDefaultsWithCoveredEnums
= 1;
440 for (CFGBlock::filtered_pred_iterator I
=
441 cfg
->getExit().filtered_pred_start_end(FO
);
443 const CFGBlock
&B
= **I
;
444 if (!live
[B
.getBlockID()])
447 // Skip blocks which contain an element marked as no-return. They don't
448 // represent actually viable edges into the exit block, so mark them as
450 if (B
.hasNoReturnElement()) {
451 HasAbnormalEdge
= true;
455 // Destructors can appear after the 'return' in the CFG. This is
456 // normal. We need to look pass the destructors for the return
457 // statement (if it exists).
458 CFGBlock::const_reverse_iterator ri
= B
.rbegin(), re
= B
.rend();
460 for ( ; ri
!= re
; ++ri
)
461 if (ri
->getAs
<CFGStmt
>())
464 // No more CFGElements in the block?
466 const Stmt
*Term
= B
.getTerminatorStmt();
467 if (Term
&& (isa
<CXXTryStmt
>(Term
) || isa
<ObjCAtTryStmt
>(Term
))) {
468 HasAbnormalEdge
= true;
471 // A labeled empty statement, or the entry block...
476 CFGStmt CS
= ri
->castAs
<CFGStmt
>();
477 const Stmt
*S
= CS
.getStmt();
478 if (isa
<ReturnStmt
>(S
) || isa
<CoreturnStmt
>(S
)) {
479 HasLiveReturn
= true;
482 if (isa
<ObjCAtThrowStmt
>(S
)) {
486 if (isa
<CXXThrowExpr
>(S
)) {
490 if (isa
<MSAsmStmt
>(S
)) {
491 // TODO: Verify this is correct.
493 HasLiveReturn
= true;
496 if (isa
<CXXTryStmt
>(S
)) {
497 HasAbnormalEdge
= true;
500 if (!llvm::is_contained(B
.succs(), &cfg
->getExit())) {
501 HasAbnormalEdge
= true;
509 return NeverFallThrough
;
510 return NeverFallThroughOrReturn
;
512 if (HasAbnormalEdge
|| HasFakeEdge
|| HasLiveReturn
)
513 return MaybeFallThrough
;
514 // This says AlwaysFallThrough for calls to functions that are not marked
515 // noreturn, that don't return. If people would like this warning to be more
516 // accurate, such functions should be marked as noreturn.
517 return AlwaysFallThrough
;
522 struct CheckFallThroughDiagnostics
{
523 unsigned diag_MaybeFallThrough_HasNoReturn
;
524 unsigned diag_MaybeFallThrough_ReturnsNonVoid
;
525 unsigned diag_AlwaysFallThrough_HasNoReturn
;
526 unsigned diag_AlwaysFallThrough_ReturnsNonVoid
;
527 unsigned diag_NeverFallThroughOrReturn
;
528 enum { Function
, Block
, Lambda
, Coroutine
} funMode
;
529 SourceLocation FuncLoc
;
531 static CheckFallThroughDiagnostics
MakeForFunction(const Decl
*Func
) {
532 CheckFallThroughDiagnostics D
;
533 D
.FuncLoc
= Func
->getLocation();
534 D
.diag_MaybeFallThrough_HasNoReturn
=
535 diag::warn_falloff_noreturn_function
;
536 D
.diag_MaybeFallThrough_ReturnsNonVoid
=
537 diag::warn_maybe_falloff_nonvoid_function
;
538 D
.diag_AlwaysFallThrough_HasNoReturn
=
539 diag::warn_falloff_noreturn_function
;
540 D
.diag_AlwaysFallThrough_ReturnsNonVoid
=
541 diag::warn_falloff_nonvoid_function
;
543 // Don't suggest that virtual functions be marked "noreturn", since they
544 // might be overridden by non-noreturn functions.
545 bool isVirtualMethod
= false;
546 if (const CXXMethodDecl
*Method
= dyn_cast
<CXXMethodDecl
>(Func
))
547 isVirtualMethod
= Method
->isVirtual();
549 // Don't suggest that template instantiations be marked "noreturn"
550 bool isTemplateInstantiation
= false;
551 if (const FunctionDecl
*Function
= dyn_cast
<FunctionDecl
>(Func
))
552 isTemplateInstantiation
= Function
->isTemplateInstantiation();
554 if (!isVirtualMethod
&& !isTemplateInstantiation
)
555 D
.diag_NeverFallThroughOrReturn
=
556 diag::warn_suggest_noreturn_function
;
558 D
.diag_NeverFallThroughOrReturn
= 0;
560 D
.funMode
= Function
;
564 static CheckFallThroughDiagnostics
MakeForCoroutine(const Decl
*Func
) {
565 CheckFallThroughDiagnostics D
;
566 D
.FuncLoc
= Func
->getLocation();
567 D
.diag_MaybeFallThrough_HasNoReturn
= 0;
568 D
.diag_MaybeFallThrough_ReturnsNonVoid
=
569 diag::warn_maybe_falloff_nonvoid_coroutine
;
570 D
.diag_AlwaysFallThrough_HasNoReturn
= 0;
571 D
.diag_AlwaysFallThrough_ReturnsNonVoid
=
572 diag::warn_falloff_nonvoid_coroutine
;
573 D
.funMode
= Coroutine
;
577 static CheckFallThroughDiagnostics
MakeForBlock() {
578 CheckFallThroughDiagnostics D
;
579 D
.diag_MaybeFallThrough_HasNoReturn
=
580 diag::err_noreturn_block_has_return_expr
;
581 D
.diag_MaybeFallThrough_ReturnsNonVoid
=
582 diag::err_maybe_falloff_nonvoid_block
;
583 D
.diag_AlwaysFallThrough_HasNoReturn
=
584 diag::err_noreturn_block_has_return_expr
;
585 D
.diag_AlwaysFallThrough_ReturnsNonVoid
=
586 diag::err_falloff_nonvoid_block
;
587 D
.diag_NeverFallThroughOrReturn
= 0;
592 static CheckFallThroughDiagnostics
MakeForLambda() {
593 CheckFallThroughDiagnostics D
;
594 D
.diag_MaybeFallThrough_HasNoReturn
=
595 diag::err_noreturn_lambda_has_return_expr
;
596 D
.diag_MaybeFallThrough_ReturnsNonVoid
=
597 diag::warn_maybe_falloff_nonvoid_lambda
;
598 D
.diag_AlwaysFallThrough_HasNoReturn
=
599 diag::err_noreturn_lambda_has_return_expr
;
600 D
.diag_AlwaysFallThrough_ReturnsNonVoid
=
601 diag::warn_falloff_nonvoid_lambda
;
602 D
.diag_NeverFallThroughOrReturn
= 0;
607 bool checkDiagnostics(DiagnosticsEngine
&D
, bool ReturnsVoid
,
608 bool HasNoReturn
) const {
609 if (funMode
== Function
) {
610 return (ReturnsVoid
||
611 D
.isIgnored(diag::warn_maybe_falloff_nonvoid_function
,
614 D
.isIgnored(diag::warn_noreturn_function_has_return_expr
,
617 D
.isIgnored(diag::warn_suggest_noreturn_block
, FuncLoc
));
619 if (funMode
== Coroutine
) {
620 return (ReturnsVoid
||
621 D
.isIgnored(diag::warn_maybe_falloff_nonvoid_function
, FuncLoc
) ||
622 D
.isIgnored(diag::warn_maybe_falloff_nonvoid_coroutine
,
626 // For blocks / lambdas.
627 return ReturnsVoid
&& !HasNoReturn
;
631 } // anonymous namespace
633 /// CheckFallThroughForBody - Check that we don't fall off the end of a
634 /// function that should return a value. Check that we don't fall off the end
635 /// of a noreturn function. We assume that functions and blocks not marked
636 /// noreturn will return.
637 static void CheckFallThroughForBody(Sema
&S
, const Decl
*D
, const Stmt
*Body
,
639 const CheckFallThroughDiagnostics
&CD
,
640 AnalysisDeclContext
&AC
,
641 sema::FunctionScopeInfo
*FSI
) {
643 bool ReturnsVoid
= false;
644 bool HasNoReturn
= false;
645 bool IsCoroutine
= FSI
->isCoroutine();
647 if (const auto *FD
= dyn_cast
<FunctionDecl
>(D
)) {
648 if (const auto *CBody
= dyn_cast
<CoroutineBodyStmt
>(Body
))
649 ReturnsVoid
= CBody
->getFallthroughHandler() != nullptr;
651 ReturnsVoid
= FD
->getReturnType()->isVoidType();
652 HasNoReturn
= FD
->isNoReturn();
654 else if (const auto *MD
= dyn_cast
<ObjCMethodDecl
>(D
)) {
655 ReturnsVoid
= MD
->getReturnType()->isVoidType();
656 HasNoReturn
= MD
->hasAttr
<NoReturnAttr
>();
658 else if (isa
<BlockDecl
>(D
)) {
659 if (const FunctionType
*FT
=
660 BlockType
->getPointeeType()->getAs
<FunctionType
>()) {
661 if (FT
->getReturnType()->isVoidType())
663 if (FT
->getNoReturnAttr())
668 DiagnosticsEngine
&Diags
= S
.getDiagnostics();
670 // Short circuit for compilation speed.
671 if (CD
.checkDiagnostics(Diags
, ReturnsVoid
, HasNoReturn
))
673 SourceLocation LBrace
= Body
->getBeginLoc(), RBrace
= Body
->getEndLoc();
674 auto EmitDiag
= [&](SourceLocation Loc
, unsigned DiagID
) {
676 S
.Diag(Loc
, DiagID
) << FSI
->CoroutinePromise
->getType();
681 // cpu_dispatch functions permit empty function bodies for ICC compatibility.
682 if (D
->getAsFunction() && D
->getAsFunction()->isCPUDispatchMultiVersion())
685 // Either in a function body compound statement, or a function-try-block.
686 switch (CheckFallThrough(AC
)) {
687 case UnknownFallThrough
:
690 case MaybeFallThrough
:
692 EmitDiag(RBrace
, CD
.diag_MaybeFallThrough_HasNoReturn
);
693 else if (!ReturnsVoid
)
694 EmitDiag(RBrace
, CD
.diag_MaybeFallThrough_ReturnsNonVoid
);
696 case AlwaysFallThrough
:
698 EmitDiag(RBrace
, CD
.diag_AlwaysFallThrough_HasNoReturn
);
699 else if (!ReturnsVoid
)
700 EmitDiag(RBrace
, CD
.diag_AlwaysFallThrough_ReturnsNonVoid
);
702 case NeverFallThroughOrReturn
:
703 if (ReturnsVoid
&& !HasNoReturn
&& CD
.diag_NeverFallThroughOrReturn
) {
704 if (const FunctionDecl
*FD
= dyn_cast
<FunctionDecl
>(D
)) {
705 S
.Diag(LBrace
, CD
.diag_NeverFallThroughOrReturn
) << 0 << FD
;
706 } else if (const ObjCMethodDecl
*MD
= dyn_cast
<ObjCMethodDecl
>(D
)) {
707 S
.Diag(LBrace
, CD
.diag_NeverFallThroughOrReturn
) << 1 << MD
;
709 S
.Diag(LBrace
, CD
.diag_NeverFallThroughOrReturn
);
713 case NeverFallThrough
:
718 //===----------------------------------------------------------------------===//
720 //===----------------------------------------------------------------------===//
723 /// ContainsReference - A visitor class to search for references to
724 /// a particular declaration (the needle) within any evaluated component of an
725 /// expression (recursively).
726 class ContainsReference
: public ConstEvaluatedExprVisitor
<ContainsReference
> {
728 const DeclRefExpr
*Needle
;
731 typedef ConstEvaluatedExprVisitor
<ContainsReference
> Inherited
;
733 ContainsReference(ASTContext
&Context
, const DeclRefExpr
*Needle
)
734 : Inherited(Context
), FoundReference(false), Needle(Needle
) {}
736 void VisitExpr(const Expr
*E
) {
737 // Stop evaluating if we already have a reference.
741 Inherited::VisitExpr(E
);
744 void VisitDeclRefExpr(const DeclRefExpr
*E
) {
746 FoundReference
= true;
748 Inherited::VisitDeclRefExpr(E
);
751 bool doesContainReference() const { return FoundReference
; }
753 } // anonymous namespace
755 static bool SuggestInitializationFixit(Sema
&S
, const VarDecl
*VD
) {
756 QualType VariableTy
= VD
->getType().getCanonicalType();
757 if (VariableTy
->isBlockPointerType() &&
758 !VD
->hasAttr
<BlocksAttr
>()) {
759 S
.Diag(VD
->getLocation(), diag::note_block_var_fixit_add_initialization
)
761 << FixItHint::CreateInsertion(VD
->getLocation(), "__block ");
765 // Don't issue a fixit if there is already an initializer.
769 // Don't suggest a fixit inside macros.
770 if (VD
->getEndLoc().isMacroID())
773 SourceLocation Loc
= S
.getLocForEndOfToken(VD
->getEndLoc());
775 // Suggest possible initialization (if any).
776 std::string Init
= S
.getFixItZeroInitializerForType(VariableTy
, Loc
);
780 S
.Diag(Loc
, diag::note_var_fixit_add_initialization
) << VD
->getDeclName()
781 << FixItHint::CreateInsertion(Loc
, Init
);
785 /// Create a fixit to remove an if-like statement, on the assumption that its
786 /// condition is CondVal.
787 static void CreateIfFixit(Sema
&S
, const Stmt
*If
, const Stmt
*Then
,
788 const Stmt
*Else
, bool CondVal
,
789 FixItHint
&Fixit1
, FixItHint
&Fixit2
) {
791 // If condition is always true, remove all but the 'then'.
792 Fixit1
= FixItHint::CreateRemoval(
793 CharSourceRange::getCharRange(If
->getBeginLoc(), Then
->getBeginLoc()));
795 SourceLocation ElseKwLoc
= S
.getLocForEndOfToken(Then
->getEndLoc());
797 FixItHint::CreateRemoval(SourceRange(ElseKwLoc
, Else
->getEndLoc()));
800 // If condition is always false, remove all but the 'else'.
802 Fixit1
= FixItHint::CreateRemoval(CharSourceRange::getCharRange(
803 If
->getBeginLoc(), Else
->getBeginLoc()));
805 Fixit1
= FixItHint::CreateRemoval(If
->getSourceRange());
809 /// DiagUninitUse -- Helper function to produce a diagnostic for an
810 /// uninitialized use of a variable.
811 static void DiagUninitUse(Sema
&S
, const VarDecl
*VD
, const UninitUse
&Use
,
812 bool IsCapturedByBlock
) {
813 bool Diagnosed
= false;
815 switch (Use
.getKind()) {
816 case UninitUse::Always
:
817 S
.Diag(Use
.getUser()->getBeginLoc(), diag::warn_uninit_var
)
818 << VD
->getDeclName() << IsCapturedByBlock
819 << Use
.getUser()->getSourceRange();
822 case UninitUse::AfterDecl
:
823 case UninitUse::AfterCall
:
824 S
.Diag(VD
->getLocation(), diag::warn_sometimes_uninit_var
)
825 << VD
->getDeclName() << IsCapturedByBlock
826 << (Use
.getKind() == UninitUse::AfterDecl
? 4 : 5)
827 << const_cast<DeclContext
*>(VD
->getLexicalDeclContext())
828 << VD
->getSourceRange();
829 S
.Diag(Use
.getUser()->getBeginLoc(), diag::note_uninit_var_use
)
830 << IsCapturedByBlock
<< Use
.getUser()->getSourceRange();
833 case UninitUse::Maybe
:
834 case UninitUse::Sometimes
:
835 // Carry on to report sometimes-uninitialized branches, if possible,
836 // or a 'may be used uninitialized' diagnostic otherwise.
840 // Diagnose each branch which leads to a sometimes-uninitialized use.
841 for (UninitUse::branch_iterator I
= Use
.branch_begin(), E
= Use
.branch_end();
843 assert(Use
.getKind() == UninitUse::Sometimes
);
845 const Expr
*User
= Use
.getUser();
846 const Stmt
*Term
= I
->Terminator
;
848 // Information used when building the diagnostic.
853 // FixIts to suppress the diagnostic by removing the dead condition.
854 // For all binary terminators, branch 0 is taken if the condition is true,
855 // and branch 1 is taken if the condition is false.
856 int RemoveDiagKind
= -1;
857 const char *FixitStr
=
858 S
.getLangOpts().CPlusPlus
? (I
->Output
? "true" : "false")
859 : (I
->Output
? "1" : "0");
860 FixItHint Fixit1
, Fixit2
;
862 switch (Term
? Term
->getStmtClass() : Stmt::DeclStmtClass
) {
864 // Don't know how to report this. Just fall back to 'may be used
865 // uninitialized'. FIXME: Can this happen?
868 // "condition is true / condition is false".
869 case Stmt::IfStmtClass
: {
870 const IfStmt
*IS
= cast
<IfStmt
>(Term
);
873 Range
= IS
->getCond()->getSourceRange();
875 CreateIfFixit(S
, IS
, IS
->getThen(), IS
->getElse(),
876 I
->Output
, Fixit1
, Fixit2
);
879 case Stmt::ConditionalOperatorClass
: {
880 const ConditionalOperator
*CO
= cast
<ConditionalOperator
>(Term
);
883 Range
= CO
->getCond()->getSourceRange();
885 CreateIfFixit(S
, CO
, CO
->getTrueExpr(), CO
->getFalseExpr(),
886 I
->Output
, Fixit1
, Fixit2
);
889 case Stmt::BinaryOperatorClass
: {
890 const BinaryOperator
*BO
= cast
<BinaryOperator
>(Term
);
891 if (!BO
->isLogicalOp())
894 Str
= BO
->getOpcodeStr();
895 Range
= BO
->getLHS()->getSourceRange();
897 if ((BO
->getOpcode() == BO_LAnd
&& I
->Output
) ||
898 (BO
->getOpcode() == BO_LOr
&& !I
->Output
))
899 // true && y -> y, false || y -> y.
900 Fixit1
= FixItHint::CreateRemoval(
901 SourceRange(BO
->getBeginLoc(), BO
->getOperatorLoc()));
903 // false && y -> false, true || y -> true.
904 Fixit1
= FixItHint::CreateReplacement(BO
->getSourceRange(), FixitStr
);
908 // "loop is entered / loop is exited".
909 case Stmt::WhileStmtClass
:
912 Range
= cast
<WhileStmt
>(Term
)->getCond()->getSourceRange();
914 Fixit1
= FixItHint::CreateReplacement(Range
, FixitStr
);
916 case Stmt::ForStmtClass
:
919 Range
= cast
<ForStmt
>(Term
)->getCond()->getSourceRange();
922 Fixit1
= FixItHint::CreateRemoval(Range
);
924 Fixit1
= FixItHint::CreateReplacement(Range
, FixitStr
);
926 case Stmt::CXXForRangeStmtClass
:
927 if (I
->Output
== 1) {
928 // The use occurs if a range-based for loop's body never executes.
929 // That may be impossible, and there's no syntactic fix for this,
930 // so treat it as a 'may be uninitialized' case.
935 Range
= cast
<CXXForRangeStmt
>(Term
)->getRangeInit()->getSourceRange();
938 // "condition is true / loop is exited".
939 case Stmt::DoStmtClass
:
942 Range
= cast
<DoStmt
>(Term
)->getCond()->getSourceRange();
944 Fixit1
= FixItHint::CreateReplacement(Range
, FixitStr
);
947 // "switch case is taken".
948 case Stmt::CaseStmtClass
:
951 Range
= cast
<CaseStmt
>(Term
)->getLHS()->getSourceRange();
953 case Stmt::DefaultStmtClass
:
956 Range
= cast
<DefaultStmt
>(Term
)->getDefaultLoc();
960 S
.Diag(Range
.getBegin(), diag::warn_sometimes_uninit_var
)
961 << VD
->getDeclName() << IsCapturedByBlock
<< DiagKind
962 << Str
<< I
->Output
<< Range
;
963 S
.Diag(User
->getBeginLoc(), diag::note_uninit_var_use
)
964 << IsCapturedByBlock
<< User
->getSourceRange();
965 if (RemoveDiagKind
!= -1)
966 S
.Diag(Fixit1
.RemoveRange
.getBegin(), diag::note_uninit_fixit_remove_cond
)
967 << RemoveDiagKind
<< Str
<< I
->Output
<< Fixit1
<< Fixit2
;
973 S
.Diag(Use
.getUser()->getBeginLoc(), diag::warn_maybe_uninit_var
)
974 << VD
->getDeclName() << IsCapturedByBlock
975 << Use
.getUser()->getSourceRange();
978 /// Diagnose uninitialized const reference usages.
979 static bool DiagnoseUninitializedConstRefUse(Sema
&S
, const VarDecl
*VD
,
980 const UninitUse
&Use
) {
981 S
.Diag(Use
.getUser()->getBeginLoc(), diag::warn_uninit_const_reference
)
982 << VD
->getDeclName() << Use
.getUser()->getSourceRange();
986 /// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
987 /// uninitialized variable. This manages the different forms of diagnostic
988 /// emitted for particular types of uses. Returns true if the use was diagnosed
989 /// as a warning. If a particular use is one we omit warnings for, returns
991 static bool DiagnoseUninitializedUse(Sema
&S
, const VarDecl
*VD
,
992 const UninitUse
&Use
,
993 bool alwaysReportSelfInit
= false) {
994 if (const DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(Use
.getUser())) {
995 // Inspect the initializer of the variable declaration which is
996 // being referenced prior to its initialization. We emit
997 // specialized diagnostics for self-initialization, and we
998 // specifically avoid warning about self references which take the
1003 // This is used to indicate to GCC that 'x' is intentionally left
1004 // uninitialized. Proven code paths which access 'x' in
1005 // an uninitialized state after this will still warn.
1006 if (const Expr
*Initializer
= VD
->getInit()) {
1007 if (!alwaysReportSelfInit
&& DRE
== Initializer
->IgnoreParenImpCasts())
1010 ContainsReference
CR(S
.Context
, DRE
);
1011 CR
.Visit(Initializer
);
1012 if (CR
.doesContainReference()) {
1013 S
.Diag(DRE
->getBeginLoc(), diag::warn_uninit_self_reference_in_init
)
1014 << VD
->getDeclName() << VD
->getLocation() << DRE
->getSourceRange();
1019 DiagUninitUse(S
, VD
, Use
, false);
1021 const BlockExpr
*BE
= cast
<BlockExpr
>(Use
.getUser());
1022 if (VD
->getType()->isBlockPointerType() && !VD
->hasAttr
<BlocksAttr
>())
1023 S
.Diag(BE
->getBeginLoc(),
1024 diag::warn_uninit_byref_blockvar_captured_by_block
)
1025 << VD
->getDeclName()
1026 << VD
->getType().getQualifiers().hasObjCLifetime();
1028 DiagUninitUse(S
, VD
, Use
, true);
1031 // Report where the variable was declared when the use wasn't within
1032 // the initializer of that declaration & we didn't already suggest
1033 // an initialization fixit.
1034 if (!SuggestInitializationFixit(S
, VD
))
1035 S
.Diag(VD
->getBeginLoc(), diag::note_var_declared_here
)
1036 << VD
->getDeclName();
1042 class FallthroughMapper
: public RecursiveASTVisitor
<FallthroughMapper
> {
1044 FallthroughMapper(Sema
&S
)
1045 : FoundSwitchStatements(false),
1049 bool foundSwitchStatements() const { return FoundSwitchStatements
; }
1051 void markFallthroughVisited(const AttributedStmt
*Stmt
) {
1052 bool Found
= FallthroughStmts
.erase(Stmt
);
1057 typedef llvm::SmallPtrSet
<const AttributedStmt
*, 8> AttrStmts
;
1059 const AttrStmts
&getFallthroughStmts() const {
1060 return FallthroughStmts
;
1063 void fillReachableBlocks(CFG
*Cfg
) {
1064 assert(ReachableBlocks
.empty() && "ReachableBlocks already filled");
1065 std::deque
<const CFGBlock
*> BlockQueue
;
1067 ReachableBlocks
.insert(&Cfg
->getEntry());
1068 BlockQueue
.push_back(&Cfg
->getEntry());
1069 // Mark all case blocks reachable to avoid problems with switching on
1070 // constants, covered enums, etc.
1071 // These blocks can contain fall-through annotations, and we don't want to
1072 // issue a warn_fallthrough_attr_unreachable for them.
1073 for (const auto *B
: *Cfg
) {
1074 const Stmt
*L
= B
->getLabel();
1075 if (L
&& isa
<SwitchCase
>(L
) && ReachableBlocks
.insert(B
).second
)
1076 BlockQueue
.push_back(B
);
1079 while (!BlockQueue
.empty()) {
1080 const CFGBlock
*P
= BlockQueue
.front();
1081 BlockQueue
.pop_front();
1082 for (const CFGBlock
*B
: P
->succs()) {
1083 if (B
&& ReachableBlocks
.insert(B
).second
)
1084 BlockQueue
.push_back(B
);
1089 bool checkFallThroughIntoBlock(const CFGBlock
&B
, int &AnnotatedCnt
,
1090 bool IsTemplateInstantiation
) {
1091 assert(!ReachableBlocks
.empty() && "ReachableBlocks empty");
1093 int UnannotatedCnt
= 0;
1096 std::deque
<const CFGBlock
*> BlockQueue(B
.pred_begin(), B
.pred_end());
1097 while (!BlockQueue
.empty()) {
1098 const CFGBlock
*P
= BlockQueue
.front();
1099 BlockQueue
.pop_front();
1102 const Stmt
*Term
= P
->getTerminatorStmt();
1103 if (Term
&& isa
<SwitchStmt
>(Term
))
1104 continue; // Switch statement, good.
1106 const SwitchCase
*SW
= dyn_cast_or_null
<SwitchCase
>(P
->getLabel());
1107 if (SW
&& SW
->getSubStmt() == B
.getLabel() && P
->begin() == P
->end())
1108 continue; // Previous case label has no statements, good.
1110 const LabelStmt
*L
= dyn_cast_or_null
<LabelStmt
>(P
->getLabel());
1111 if (L
&& L
->getSubStmt() == B
.getLabel() && P
->begin() == P
->end())
1112 continue; // Case label is preceded with a normal label, good.
1114 if (!ReachableBlocks
.count(P
)) {
1115 for (const CFGElement
&Elem
: llvm::reverse(*P
)) {
1116 if (Optional
<CFGStmt
> CS
= Elem
.getAs
<CFGStmt
>()) {
1117 if (const AttributedStmt
*AS
= asFallThroughAttr(CS
->getStmt())) {
1118 // Don't issue a warning for an unreachable fallthrough
1119 // attribute in template instantiations as it may not be
1120 // unreachable in all instantiations of the template.
1121 if (!IsTemplateInstantiation
)
1122 S
.Diag(AS
->getBeginLoc(),
1123 diag::warn_unreachable_fallthrough_attr
);
1124 markFallthroughVisited(AS
);
1128 // Don't care about other unreachable statements.
1131 // If there are no unreachable statements, this may be a special
1134 // A a; // A has a destructor.
1137 // // <<<< This place is represented by a 'hanging' CFG block.
1142 const Stmt
*LastStmt
= getLastStmt(*P
);
1143 if (const AttributedStmt
*AS
= asFallThroughAttr(LastStmt
)) {
1144 markFallthroughVisited(AS
);
1146 continue; // Fallthrough annotation, good.
1149 if (!LastStmt
) { // This block contains no executable statements.
1150 // Traverse its predecessors.
1151 std::copy(P
->pred_begin(), P
->pred_end(),
1152 std::back_inserter(BlockQueue
));
1158 return !!UnannotatedCnt
;
1161 // RecursiveASTVisitor setup.
1162 bool shouldWalkTypesOfTypeLocs() const { return false; }
1164 bool VisitAttributedStmt(AttributedStmt
*S
) {
1165 if (asFallThroughAttr(S
))
1166 FallthroughStmts
.insert(S
);
1170 bool VisitSwitchStmt(SwitchStmt
*S
) {
1171 FoundSwitchStatements
= true;
1175 // We don't want to traverse local type declarations. We analyze their
1176 // methods separately.
1177 bool TraverseDecl(Decl
*D
) { return true; }
1179 // We analyze lambda bodies separately. Skip them here.
1180 bool TraverseLambdaExpr(LambdaExpr
*LE
) {
1181 // Traverse the captures, but not the body.
1182 for (const auto C
: zip(LE
->captures(), LE
->capture_inits()))
1183 TraverseLambdaCapture(LE
, &std::get
<0>(C
), std::get
<1>(C
));
1189 static const AttributedStmt
*asFallThroughAttr(const Stmt
*S
) {
1190 if (const AttributedStmt
*AS
= dyn_cast_or_null
<AttributedStmt
>(S
)) {
1191 if (hasSpecificAttr
<FallThroughAttr
>(AS
->getAttrs()))
1197 static const Stmt
*getLastStmt(const CFGBlock
&B
) {
1198 if (const Stmt
*Term
= B
.getTerminatorStmt())
1200 for (const CFGElement
&Elem
: llvm::reverse(B
))
1201 if (Optional
<CFGStmt
> CS
= Elem
.getAs
<CFGStmt
>())
1202 return CS
->getStmt();
1203 // Workaround to detect a statement thrown out by CFGBuilder:
1204 // case X: {} case Y:
1205 // case X: ; case Y:
1206 if (const SwitchCase
*SW
= dyn_cast_or_null
<SwitchCase
>(B
.getLabel()))
1207 if (!isa
<SwitchCase
>(SW
->getSubStmt()))
1208 return SW
->getSubStmt();
1213 bool FoundSwitchStatements
;
1214 AttrStmts FallthroughStmts
;
1216 llvm::SmallPtrSet
<const CFGBlock
*, 16> ReachableBlocks
;
1218 } // anonymous namespace
1220 static StringRef
getFallthroughAttrSpelling(Preprocessor
&PP
,
1221 SourceLocation Loc
) {
1222 TokenValue FallthroughTokens
[] = {
1223 tok::l_square
, tok::l_square
,
1224 PP
.getIdentifierInfo("fallthrough"),
1225 tok::r_square
, tok::r_square
1228 TokenValue ClangFallthroughTokens
[] = {
1229 tok::l_square
, tok::l_square
, PP
.getIdentifierInfo("clang"),
1230 tok::coloncolon
, PP
.getIdentifierInfo("fallthrough"),
1231 tok::r_square
, tok::r_square
1234 bool PreferClangAttr
= !PP
.getLangOpts().CPlusPlus17
&& !PP
.getLangOpts().C2x
;
1236 StringRef MacroName
;
1237 if (PreferClangAttr
)
1238 MacroName
= PP
.getLastMacroWithSpelling(Loc
, ClangFallthroughTokens
);
1239 if (MacroName
.empty())
1240 MacroName
= PP
.getLastMacroWithSpelling(Loc
, FallthroughTokens
);
1241 if (MacroName
.empty() && !PreferClangAttr
)
1242 MacroName
= PP
.getLastMacroWithSpelling(Loc
, ClangFallthroughTokens
);
1243 if (MacroName
.empty()) {
1244 if (!PreferClangAttr
)
1245 MacroName
= "[[fallthrough]]";
1246 else if (PP
.getLangOpts().CPlusPlus
)
1247 MacroName
= "[[clang::fallthrough]]";
1249 MacroName
= "__attribute__((fallthrough))";
1254 static void DiagnoseSwitchLabelsFallthrough(Sema
&S
, AnalysisDeclContext
&AC
,
1256 FallthroughMapper
FM(S
);
1257 FM
.TraverseStmt(AC
.getBody());
1259 if (!FM
.foundSwitchStatements())
1262 if (PerFunction
&& FM
.getFallthroughStmts().empty())
1265 CFG
*Cfg
= AC
.getCFG();
1270 FM
.fillReachableBlocks(Cfg
);
1272 for (const CFGBlock
*B
: llvm::reverse(*Cfg
)) {
1273 const Stmt
*Label
= B
->getLabel();
1275 if (!isa_and_nonnull
<SwitchCase
>(Label
))
1280 bool IsTemplateInstantiation
= false;
1281 if (const FunctionDecl
*Function
= dyn_cast
<FunctionDecl
>(AC
.getDecl()))
1282 IsTemplateInstantiation
= Function
->isTemplateInstantiation();
1283 if (!FM
.checkFallThroughIntoBlock(*B
, AnnotatedCnt
,
1284 IsTemplateInstantiation
))
1287 S
.Diag(Label
->getBeginLoc(),
1288 PerFunction
? diag::warn_unannotated_fallthrough_per_function
1289 : diag::warn_unannotated_fallthrough
);
1291 if (!AnnotatedCnt
) {
1292 SourceLocation L
= Label
->getBeginLoc();
1296 const Stmt
*Term
= B
->getTerminatorStmt();
1297 // Skip empty cases.
1298 while (B
->empty() && !Term
&& B
->succ_size() == 1) {
1299 B
= *B
->succ_begin();
1300 Term
= B
->getTerminatorStmt();
1302 if (!(B
->empty() && Term
&& isa
<BreakStmt
>(Term
))) {
1303 Preprocessor
&PP
= S
.getPreprocessor();
1304 StringRef AnnotationSpelling
= getFallthroughAttrSpelling(PP
, L
);
1305 SmallString
<64> TextToInsert(AnnotationSpelling
);
1306 TextToInsert
+= "; ";
1307 S
.Diag(L
, diag::note_insert_fallthrough_fixit
)
1308 << AnnotationSpelling
1309 << FixItHint::CreateInsertion(L
, TextToInsert
);
1311 S
.Diag(L
, diag::note_insert_break_fixit
)
1312 << FixItHint::CreateInsertion(L
, "break; ");
1316 for (const auto *F
: FM
.getFallthroughStmts())
1317 S
.Diag(F
->getBeginLoc(), diag::err_fallthrough_attr_invalid_placement
);
1320 static bool isInLoop(const ASTContext
&Ctx
, const ParentMap
&PM
,
1325 switch (S
->getStmtClass()) {
1326 case Stmt::ForStmtClass
:
1327 case Stmt::WhileStmtClass
:
1328 case Stmt::CXXForRangeStmtClass
:
1329 case Stmt::ObjCForCollectionStmtClass
:
1331 case Stmt::DoStmtClass
: {
1332 Expr::EvalResult Result
;
1333 if (!cast
<DoStmt
>(S
)->getCond()->EvaluateAsInt(Result
, Ctx
))
1335 return Result
.Val
.getInt().getBoolValue();
1340 } while ((S
= PM
.getParent(S
)));
1345 static void diagnoseRepeatedUseOfWeak(Sema
&S
,
1346 const sema::FunctionScopeInfo
*CurFn
,
1348 const ParentMap
&PM
) {
1349 typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy
;
1350 typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap
;
1351 typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector
;
1352 typedef std::pair
<const Stmt
*, WeakObjectUseMap::const_iterator
>
1355 ASTContext
&Ctx
= S
.getASTContext();
1357 const WeakObjectUseMap
&WeakMap
= CurFn
->getWeakObjectUses();
1359 // Extract all weak objects that are referenced more than once.
1360 SmallVector
<StmtUsesPair
, 8> UsesByStmt
;
1361 for (WeakObjectUseMap::const_iterator I
= WeakMap
.begin(), E
= WeakMap
.end();
1363 const WeakUseVector
&Uses
= I
->second
;
1365 // Find the first read of the weak object.
1366 WeakUseVector::const_iterator UI
= Uses
.begin(), UE
= Uses
.end();
1367 for ( ; UI
!= UE
; ++UI
) {
1372 // If there were only writes to this object, don't warn.
1376 // If there was only one read, followed by any number of writes, and the
1377 // read is not within a loop, don't warn. Additionally, don't warn in a
1378 // loop if the base object is a local variable -- local variables are often
1379 // changed in loops.
1380 if (UI
== Uses
.begin()) {
1381 WeakUseVector::const_iterator UI2
= UI
;
1382 for (++UI2
; UI2
!= UE
; ++UI2
)
1383 if (UI2
->isUnsafe())
1387 if (!isInLoop(Ctx
, PM
, UI
->getUseExpr()))
1390 const WeakObjectProfileTy
&Profile
= I
->first
;
1391 if (!Profile
.isExactProfile())
1394 const NamedDecl
*Base
= Profile
.getBase();
1396 Base
= Profile
.getProperty();
1397 assert(Base
&& "A profile always has a base or property.");
1399 if (const VarDecl
*BaseVar
= dyn_cast
<VarDecl
>(Base
))
1400 if (BaseVar
->hasLocalStorage() && !isa
<ParmVarDecl
>(Base
))
1405 UsesByStmt
.push_back(StmtUsesPair(UI
->getUseExpr(), I
));
1408 if (UsesByStmt
.empty())
1411 // Sort by first use so that we emit the warnings in a deterministic order.
1412 SourceManager
&SM
= S
.getSourceManager();
1413 llvm::sort(UsesByStmt
,
1414 [&SM
](const StmtUsesPair
&LHS
, const StmtUsesPair
&RHS
) {
1415 return SM
.isBeforeInTranslationUnit(LHS
.first
->getBeginLoc(),
1416 RHS
.first
->getBeginLoc());
1419 // Classify the current code body for better warning text.
1420 // This enum should stay in sync with the cases in
1421 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
1422 // FIXME: Should we use a common classification enum and the same set of
1423 // possibilities all throughout Sema?
1431 if (isa
<sema::BlockScopeInfo
>(CurFn
))
1432 FunctionKind
= Block
;
1433 else if (isa
<sema::LambdaScopeInfo
>(CurFn
))
1434 FunctionKind
= Lambda
;
1435 else if (isa
<ObjCMethodDecl
>(D
))
1436 FunctionKind
= Method
;
1438 FunctionKind
= Function
;
1440 // Iterate through the sorted problems and emit warnings for each.
1441 for (const auto &P
: UsesByStmt
) {
1442 const Stmt
*FirstRead
= P
.first
;
1443 const WeakObjectProfileTy
&Key
= P
.second
->first
;
1444 const WeakUseVector
&Uses
= P
.second
->second
;
1446 // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy
1447 // may not contain enough information to determine that these are different
1448 // properties. We can only be 100% sure of a repeated use in certain cases,
1449 // and we adjust the diagnostic kind accordingly so that the less certain
1450 // case can be turned off if it is too noisy.
1452 if (Key
.isExactProfile())
1453 DiagKind
= diag::warn_arc_repeated_use_of_weak
;
1455 DiagKind
= diag::warn_arc_possible_repeated_use_of_weak
;
1457 // Classify the weak object being accessed for better warning text.
1458 // This enum should stay in sync with the cases in
1459 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
1467 const NamedDecl
*KeyProp
= Key
.getProperty();
1468 if (isa
<VarDecl
>(KeyProp
))
1469 ObjectKind
= Variable
;
1470 else if (isa
<ObjCPropertyDecl
>(KeyProp
))
1471 ObjectKind
= Property
;
1472 else if (isa
<ObjCMethodDecl
>(KeyProp
))
1473 ObjectKind
= ImplicitProperty
;
1474 else if (isa
<ObjCIvarDecl
>(KeyProp
))
1477 llvm_unreachable("Unexpected weak object kind!");
1479 // Do not warn about IBOutlet weak property receivers being set to null
1480 // since they are typically only used from the main thread.
1481 if (const ObjCPropertyDecl
*Prop
= dyn_cast
<ObjCPropertyDecl
>(KeyProp
))
1482 if (Prop
->hasAttr
<IBOutletAttr
>())
1485 // Show the first time the object was read.
1486 S
.Diag(FirstRead
->getBeginLoc(), DiagKind
)
1487 << int(ObjectKind
) << KeyProp
<< int(FunctionKind
)
1488 << FirstRead
->getSourceRange();
1490 // Print all the other accesses as notes.
1491 for (const auto &Use
: Uses
) {
1492 if (Use
.getUseExpr() == FirstRead
)
1494 S
.Diag(Use
.getUseExpr()->getBeginLoc(),
1495 diag::note_arc_weak_also_accessed_here
)
1496 << Use
.getUseExpr()->getSourceRange();
1503 typedef SmallVector
<PartialDiagnosticAt
, 1> OptionalNotes
;
1504 typedef std::pair
<PartialDiagnosticAt
, OptionalNotes
> DelayedDiag
;
1505 typedef std::list
<DelayedDiag
> DiagList
;
1507 struct SortDiagBySourceLocation
{
1509 SortDiagBySourceLocation(SourceManager
&SM
) : SM(SM
) {}
1511 bool operator()(const DelayedDiag
&left
, const DelayedDiag
&right
) {
1512 // Although this call will be slow, this is only called when outputting
1513 // multiple warnings.
1514 return SM
.isBeforeInTranslationUnit(left
.first
.first
, right
.first
.first
);
1517 } // anonymous namespace
1518 } // namespace clang
1521 class UninitValsDiagReporter
: public UninitVariablesHandler
{
1523 typedef SmallVector
<UninitUse
, 2> UsesVec
;
1524 typedef llvm::PointerIntPair
<UsesVec
*, 1, bool> MappedType
;
1525 // Prefer using MapVector to DenseMap, so that iteration order will be
1526 // the same as insertion order. This is needed to obtain a deterministic
1527 // order of diagnostics when calling flushDiagnostics().
1528 typedef llvm::MapVector
<const VarDecl
*, MappedType
> UsesMap
;
1530 UsesMap constRefUses
;
1533 UninitValsDiagReporter(Sema
&S
) : S(S
) {}
1534 ~UninitValsDiagReporter() override
{ flushDiagnostics(); }
1536 MappedType
&getUses(UsesMap
&um
, const VarDecl
*vd
) {
1537 MappedType
&V
= um
[vd
];
1538 if (!V
.getPointer())
1539 V
.setPointer(new UsesVec());
1543 void handleUseOfUninitVariable(const VarDecl
*vd
,
1544 const UninitUse
&use
) override
{
1545 getUses(uses
, vd
).getPointer()->push_back(use
);
1548 void handleConstRefUseOfUninitVariable(const VarDecl
*vd
,
1549 const UninitUse
&use
) override
{
1550 getUses(constRefUses
, vd
).getPointer()->push_back(use
);
1553 void handleSelfInit(const VarDecl
*vd
) override
{
1554 getUses(uses
, vd
).setInt(true);
1555 getUses(constRefUses
, vd
).setInt(true);
1558 void flushDiagnostics() {
1559 for (const auto &P
: uses
) {
1560 const VarDecl
*vd
= P
.first
;
1561 const MappedType
&V
= P
.second
;
1563 UsesVec
*vec
= V
.getPointer();
1564 bool hasSelfInit
= V
.getInt();
1566 // Specially handle the case where we have uses of an uninitialized
1567 // variable, but the root cause is an idiomatic self-init. We want
1568 // to report the diagnostic at the self-init since that is the root cause.
1569 if (!vec
->empty() && hasSelfInit
&& hasAlwaysUninitializedUse(vec
))
1570 DiagnoseUninitializedUse(S
, vd
,
1571 UninitUse(vd
->getInit()->IgnoreParenCasts(),
1572 /* isAlwaysUninit */ true),
1573 /* alwaysReportSelfInit */ true);
1575 // Sort the uses by their SourceLocations. While not strictly
1576 // guaranteed to produce them in line/column order, this will provide
1577 // a stable ordering.
1578 llvm::sort(*vec
, [](const UninitUse
&a
, const UninitUse
&b
) {
1579 // Prefer a more confident report over a less confident one.
1580 if (a
.getKind() != b
.getKind())
1581 return a
.getKind() > b
.getKind();
1582 return a
.getUser()->getBeginLoc() < b
.getUser()->getBeginLoc();
1585 for (const auto &U
: *vec
) {
1586 // If we have self-init, downgrade all uses to 'may be uninitialized'.
1587 UninitUse Use
= hasSelfInit
? UninitUse(U
.getUser(), false) : U
;
1589 if (DiagnoseUninitializedUse(S
, vd
, Use
))
1590 // Skip further diagnostics for this variable. We try to warn only
1591 // on the first point at which a variable is used uninitialized.
1596 // Release the uses vector.
1602 // Flush all const reference uses diags.
1603 for (const auto &P
: constRefUses
) {
1604 const VarDecl
*vd
= P
.first
;
1605 const MappedType
&V
= P
.second
;
1607 UsesVec
*vec
= V
.getPointer();
1608 bool hasSelfInit
= V
.getInt();
1610 if (!vec
->empty() && hasSelfInit
&& hasAlwaysUninitializedUse(vec
))
1611 DiagnoseUninitializedUse(S
, vd
,
1612 UninitUse(vd
->getInit()->IgnoreParenCasts(),
1613 /* isAlwaysUninit */ true),
1614 /* alwaysReportSelfInit */ true);
1616 for (const auto &U
: *vec
) {
1617 if (DiagnoseUninitializedConstRefUse(S
, vd
, U
))
1622 // Release the uses vector.
1626 constRefUses
.clear();
1630 static bool hasAlwaysUninitializedUse(const UsesVec
* vec
) {
1631 return llvm::any_of(*vec
, [](const UninitUse
&U
) {
1632 return U
.getKind() == UninitUse::Always
||
1633 U
.getKind() == UninitUse::AfterCall
||
1634 U
.getKind() == UninitUse::AfterDecl
;
1639 /// Inter-procedural data for the called-once checker.
1640 class CalledOnceInterProceduralData
{
1642 // Add the delayed warning for the given block.
1643 void addDelayedWarning(const BlockDecl
*Block
,
1644 PartialDiagnosticAt
&&Warning
) {
1645 DelayedBlockWarnings
[Block
].emplace_back(std::move(Warning
));
1647 // Report all of the warnings we've gathered for the given block.
1648 void flushWarnings(const BlockDecl
*Block
, Sema
&S
) {
1649 for (const PartialDiagnosticAt
&Delayed
: DelayedBlockWarnings
[Block
])
1650 S
.Diag(Delayed
.first
, Delayed
.second
);
1652 discardWarnings(Block
);
1654 // Discard all of the warnings we've gathered for the given block.
1655 void discardWarnings(const BlockDecl
*Block
) {
1656 DelayedBlockWarnings
.erase(Block
);
1660 using DelayedDiagnostics
= SmallVector
<PartialDiagnosticAt
, 2>;
1661 llvm::DenseMap
<const BlockDecl
*, DelayedDiagnostics
> DelayedBlockWarnings
;
1664 class CalledOnceCheckReporter
: public CalledOnceCheckHandler
{
1666 CalledOnceCheckReporter(Sema
&S
, CalledOnceInterProceduralData
&Data
)
1667 : S(S
), Data(Data
) {}
1668 void handleDoubleCall(const ParmVarDecl
*Parameter
, const Expr
*Call
,
1669 const Expr
*PrevCall
, bool IsCompletionHandler
,
1670 bool Poised
) override
{
1671 auto DiagToReport
= IsCompletionHandler
1672 ? diag::warn_completion_handler_called_twice
1673 : diag::warn_called_once_gets_called_twice
;
1674 S
.Diag(Call
->getBeginLoc(), DiagToReport
) << Parameter
;
1675 S
.Diag(PrevCall
->getBeginLoc(), diag::note_called_once_gets_called_twice
)
1679 void handleNeverCalled(const ParmVarDecl
*Parameter
,
1680 bool IsCompletionHandler
) override
{
1681 auto DiagToReport
= IsCompletionHandler
1682 ? diag::warn_completion_handler_never_called
1683 : diag::warn_called_once_never_called
;
1684 S
.Diag(Parameter
->getBeginLoc(), DiagToReport
)
1685 << Parameter
<< /* Captured */ false;
1688 void handleNeverCalled(const ParmVarDecl
*Parameter
, const Decl
*Function
,
1689 const Stmt
*Where
, NeverCalledReason Reason
,
1690 bool IsCalledDirectly
,
1691 bool IsCompletionHandler
) override
{
1692 auto DiagToReport
= IsCompletionHandler
1693 ? diag::warn_completion_handler_never_called_when
1694 : diag::warn_called_once_never_called_when
;
1695 PartialDiagnosticAt
Warning(Where
->getBeginLoc(), S
.PDiag(DiagToReport
)
1698 << (unsigned)Reason
);
1700 if (const auto *Block
= dyn_cast
<BlockDecl
>(Function
)) {
1701 // We shouldn't report these warnings on blocks immediately
1702 Data
.addDelayedWarning(Block
, std::move(Warning
));
1704 S
.Diag(Warning
.first
, Warning
.second
);
1708 void handleCapturedNeverCalled(const ParmVarDecl
*Parameter
,
1710 bool IsCompletionHandler
) override
{
1711 auto DiagToReport
= IsCompletionHandler
1712 ? diag::warn_completion_handler_never_called
1713 : diag::warn_called_once_never_called
;
1714 S
.Diag(Where
->getBeginLoc(), DiagToReport
)
1715 << Parameter
<< /* Captured */ true;
1719 handleBlockThatIsGuaranteedToBeCalledOnce(const BlockDecl
*Block
) override
{
1720 Data
.flushWarnings(Block
, S
);
1723 void handleBlockWithNoGuarantees(const BlockDecl
*Block
) override
{
1724 Data
.discardWarnings(Block
);
1729 CalledOnceInterProceduralData
&Data
;
1732 constexpr unsigned CalledOnceWarnings
[] = {
1733 diag::warn_called_once_never_called
,
1734 diag::warn_called_once_never_called_when
,
1735 diag::warn_called_once_gets_called_twice
};
1737 constexpr unsigned CompletionHandlerWarnings
[]{
1738 diag::warn_completion_handler_never_called
,
1739 diag::warn_completion_handler_never_called_when
,
1740 diag::warn_completion_handler_called_twice
};
1742 bool shouldAnalyzeCalledOnceImpl(llvm::ArrayRef
<unsigned> DiagIDs
,
1743 const DiagnosticsEngine
&Diags
,
1744 SourceLocation At
) {
1745 return llvm::any_of(DiagIDs
, [&Diags
, At
](unsigned DiagID
) {
1746 return !Diags
.isIgnored(DiagID
, At
);
1750 bool shouldAnalyzeCalledOnceConventions(const DiagnosticsEngine
&Diags
,
1751 SourceLocation At
) {
1752 return shouldAnalyzeCalledOnceImpl(CompletionHandlerWarnings
, Diags
, At
);
1755 bool shouldAnalyzeCalledOnceParameters(const DiagnosticsEngine
&Diags
,
1756 SourceLocation At
) {
1757 return shouldAnalyzeCalledOnceImpl(CalledOnceWarnings
, Diags
, At
) ||
1758 shouldAnalyzeCalledOnceConventions(Diags
, At
);
1760 } // anonymous namespace
1762 //===----------------------------------------------------------------------===//
1764 //===----------------------------------------------------------------------===//
1766 namespace threadSafety
{
1768 class ThreadSafetyReporter
: public clang::threadSafety::ThreadSafetyHandler
{
1771 SourceLocation FunLocation
, FunEndLocation
;
1773 const FunctionDecl
*CurrentFunction
;
1776 OptionalNotes
getNotes() const {
1777 if (Verbose
&& CurrentFunction
) {
1778 PartialDiagnosticAt
FNote(CurrentFunction
->getBody()->getBeginLoc(),
1779 S
.PDiag(diag::note_thread_warning_in_fun
)
1780 << CurrentFunction
);
1781 return OptionalNotes(1, FNote
);
1783 return OptionalNotes();
1786 OptionalNotes
getNotes(const PartialDiagnosticAt
&Note
) const {
1787 OptionalNotes
ONS(1, Note
);
1788 if (Verbose
&& CurrentFunction
) {
1789 PartialDiagnosticAt
FNote(CurrentFunction
->getBody()->getBeginLoc(),
1790 S
.PDiag(diag::note_thread_warning_in_fun
)
1791 << CurrentFunction
);
1792 ONS
.push_back(std::move(FNote
));
1797 OptionalNotes
getNotes(const PartialDiagnosticAt
&Note1
,
1798 const PartialDiagnosticAt
&Note2
) const {
1800 ONS
.push_back(Note1
);
1801 ONS
.push_back(Note2
);
1802 if (Verbose
&& CurrentFunction
) {
1803 PartialDiagnosticAt
FNote(CurrentFunction
->getBody()->getBeginLoc(),
1804 S
.PDiag(diag::note_thread_warning_in_fun
)
1805 << CurrentFunction
);
1806 ONS
.push_back(std::move(FNote
));
1811 OptionalNotes
makeLockedHereNote(SourceLocation LocLocked
, StringRef Kind
) {
1812 return LocLocked
.isValid()
1813 ? getNotes(PartialDiagnosticAt(
1814 LocLocked
, S
.PDiag(diag::note_locked_here
) << Kind
))
1818 OptionalNotes
makeUnlockedHereNote(SourceLocation LocUnlocked
,
1820 return LocUnlocked
.isValid()
1821 ? getNotes(PartialDiagnosticAt(
1822 LocUnlocked
, S
.PDiag(diag::note_unlocked_here
) << Kind
))
1827 ThreadSafetyReporter(Sema
&S
, SourceLocation FL
, SourceLocation FEL
)
1828 : S(S
), FunLocation(FL
), FunEndLocation(FEL
),
1829 CurrentFunction(nullptr), Verbose(false) {}
1831 void setVerbose(bool b
) { Verbose
= b
; }
1833 /// Emit all buffered diagnostics in order of sourcelocation.
1834 /// We need to output diagnostics produced while iterating through
1835 /// the lockset in deterministic order, so this function orders diagnostics
1836 /// and outputs them.
1837 void emitDiagnostics() {
1838 Warnings
.sort(SortDiagBySourceLocation(S
.getSourceManager()));
1839 for (const auto &Diag
: Warnings
) {
1840 S
.Diag(Diag
.first
.first
, Diag
.first
.second
);
1841 for (const auto &Note
: Diag
.second
)
1842 S
.Diag(Note
.first
, Note
.second
);
1846 void handleInvalidLockExp(SourceLocation Loc
) override
{
1847 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_cannot_resolve_lock
)
1849 Warnings
.emplace_back(std::move(Warning
), getNotes());
1852 void handleUnmatchedUnlock(StringRef Kind
, Name LockName
, SourceLocation Loc
,
1853 SourceLocation LocPreviousUnlock
) override
{
1854 if (Loc
.isInvalid())
1856 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_unlock_but_no_lock
)
1857 << Kind
<< LockName
);
1858 Warnings
.emplace_back(std::move(Warning
),
1859 makeUnlockedHereNote(LocPreviousUnlock
, Kind
));
1862 void handleIncorrectUnlockKind(StringRef Kind
, Name LockName
,
1863 LockKind Expected
, LockKind Received
,
1864 SourceLocation LocLocked
,
1865 SourceLocation LocUnlock
) override
{
1866 if (LocUnlock
.isInvalid())
1867 LocUnlock
= FunLocation
;
1868 PartialDiagnosticAt
Warning(
1869 LocUnlock
, S
.PDiag(diag::warn_unlock_kind_mismatch
)
1870 << Kind
<< LockName
<< Received
<< Expected
);
1871 Warnings
.emplace_back(std::move(Warning
),
1872 makeLockedHereNote(LocLocked
, Kind
));
1875 void handleDoubleLock(StringRef Kind
, Name LockName
, SourceLocation LocLocked
,
1876 SourceLocation LocDoubleLock
) override
{
1877 if (LocDoubleLock
.isInvalid())
1878 LocDoubleLock
= FunLocation
;
1879 PartialDiagnosticAt
Warning(LocDoubleLock
, S
.PDiag(diag::warn_double_lock
)
1880 << Kind
<< LockName
);
1881 Warnings
.emplace_back(std::move(Warning
),
1882 makeLockedHereNote(LocLocked
, Kind
));
1885 void handleMutexHeldEndOfScope(StringRef Kind
, Name LockName
,
1886 SourceLocation LocLocked
,
1887 SourceLocation LocEndOfScope
,
1888 LockErrorKind LEK
) override
{
1889 unsigned DiagID
= 0;
1891 case LEK_LockedSomePredecessors
:
1892 DiagID
= diag::warn_lock_some_predecessors
;
1894 case LEK_LockedSomeLoopIterations
:
1895 DiagID
= diag::warn_expecting_lock_held_on_loop
;
1897 case LEK_LockedAtEndOfFunction
:
1898 DiagID
= diag::warn_no_unlock
;
1900 case LEK_NotLockedAtEndOfFunction
:
1901 DiagID
= diag::warn_expecting_locked
;
1904 if (LocEndOfScope
.isInvalid())
1905 LocEndOfScope
= FunEndLocation
;
1907 PartialDiagnosticAt
Warning(LocEndOfScope
, S
.PDiag(DiagID
) << Kind
1909 Warnings
.emplace_back(std::move(Warning
),
1910 makeLockedHereNote(LocLocked
, Kind
));
1913 void handleExclusiveAndShared(StringRef Kind
, Name LockName
,
1914 SourceLocation Loc1
,
1915 SourceLocation Loc2
) override
{
1916 PartialDiagnosticAt
Warning(Loc1
,
1917 S
.PDiag(diag::warn_lock_exclusive_and_shared
)
1918 << Kind
<< LockName
);
1919 PartialDiagnosticAt
Note(Loc2
, S
.PDiag(diag::note_lock_exclusive_and_shared
)
1920 << Kind
<< LockName
);
1921 Warnings
.emplace_back(std::move(Warning
), getNotes(Note
));
1924 void handleNoMutexHeld(const NamedDecl
*D
, ProtectedOperationKind POK
,
1925 AccessKind AK
, SourceLocation Loc
) override
{
1926 assert((POK
== POK_VarAccess
|| POK
== POK_VarDereference
) &&
1927 "Only works for variables");
1928 unsigned DiagID
= POK
== POK_VarAccess
?
1929 diag::warn_variable_requires_any_lock
:
1930 diag::warn_var_deref_requires_any_lock
;
1931 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(DiagID
)
1932 << D
<< getLockKindFromAccessKind(AK
));
1933 Warnings
.emplace_back(std::move(Warning
), getNotes());
1936 void handleMutexNotHeld(StringRef Kind
, const NamedDecl
*D
,
1937 ProtectedOperationKind POK
, Name LockName
,
1938 LockKind LK
, SourceLocation Loc
,
1939 Name
*PossibleMatch
) override
{
1940 unsigned DiagID
= 0;
1941 if (PossibleMatch
) {
1944 DiagID
= diag::warn_variable_requires_lock_precise
;
1946 case POK_VarDereference
:
1947 DiagID
= diag::warn_var_deref_requires_lock_precise
;
1949 case POK_FunctionCall
:
1950 DiagID
= diag::warn_fun_requires_lock_precise
;
1953 DiagID
= diag::warn_guarded_pass_by_reference
;
1955 case POK_PtPassByRef
:
1956 DiagID
= diag::warn_pt_guarded_pass_by_reference
;
1959 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(DiagID
) << Kind
1962 PartialDiagnosticAt
Note(Loc
, S
.PDiag(diag::note_found_mutex_near_match
)
1964 if (Verbose
&& POK
== POK_VarAccess
) {
1965 PartialDiagnosticAt
VNote(D
->getLocation(),
1966 S
.PDiag(diag::note_guarded_by_declared_here
)
1967 << D
->getDeclName());
1968 Warnings
.emplace_back(std::move(Warning
), getNotes(Note
, VNote
));
1970 Warnings
.emplace_back(std::move(Warning
), getNotes(Note
));
1974 DiagID
= diag::warn_variable_requires_lock
;
1976 case POK_VarDereference
:
1977 DiagID
= diag::warn_var_deref_requires_lock
;
1979 case POK_FunctionCall
:
1980 DiagID
= diag::warn_fun_requires_lock
;
1983 DiagID
= diag::warn_guarded_pass_by_reference
;
1985 case POK_PtPassByRef
:
1986 DiagID
= diag::warn_pt_guarded_pass_by_reference
;
1989 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(DiagID
) << Kind
1992 if (Verbose
&& POK
== POK_VarAccess
) {
1993 PartialDiagnosticAt
Note(D
->getLocation(),
1994 S
.PDiag(diag::note_guarded_by_declared_here
));
1995 Warnings
.emplace_back(std::move(Warning
), getNotes(Note
));
1997 Warnings
.emplace_back(std::move(Warning
), getNotes());
2001 void handleNegativeNotHeld(StringRef Kind
, Name LockName
, Name Neg
,
2002 SourceLocation Loc
) override
{
2003 PartialDiagnosticAt
Warning(Loc
,
2004 S
.PDiag(diag::warn_acquire_requires_negative_cap
)
2005 << Kind
<< LockName
<< Neg
);
2006 Warnings
.emplace_back(std::move(Warning
), getNotes());
2009 void handleNegativeNotHeld(const NamedDecl
*D
, Name LockName
,
2010 SourceLocation Loc
) override
{
2011 PartialDiagnosticAt
Warning(
2012 Loc
, S
.PDiag(diag::warn_fun_requires_negative_cap
) << D
<< LockName
);
2013 Warnings
.emplace_back(std::move(Warning
), getNotes());
2016 void handleFunExcludesLock(StringRef Kind
, Name FunName
, Name LockName
,
2017 SourceLocation Loc
) override
{
2018 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_fun_excludes_mutex
)
2019 << Kind
<< FunName
<< LockName
);
2020 Warnings
.emplace_back(std::move(Warning
), getNotes());
2023 void handleLockAcquiredBefore(StringRef Kind
, Name L1Name
, Name L2Name
,
2024 SourceLocation Loc
) override
{
2025 PartialDiagnosticAt
Warning(Loc
,
2026 S
.PDiag(diag::warn_acquired_before
) << Kind
<< L1Name
<< L2Name
);
2027 Warnings
.emplace_back(std::move(Warning
), getNotes());
2030 void handleBeforeAfterCycle(Name L1Name
, SourceLocation Loc
) override
{
2031 PartialDiagnosticAt
Warning(Loc
,
2032 S
.PDiag(diag::warn_acquired_before_after_cycle
) << L1Name
);
2033 Warnings
.emplace_back(std::move(Warning
), getNotes());
2036 void enterFunction(const FunctionDecl
* FD
) override
{
2037 CurrentFunction
= FD
;
2040 void leaveFunction(const FunctionDecl
* FD
) override
{
2041 CurrentFunction
= nullptr;
2044 } // anonymous namespace
2045 } // namespace threadSafety
2046 } // namespace clang
2048 //===----------------------------------------------------------------------===//
2050 //===----------------------------------------------------------------------===//
2053 namespace consumed
{
2055 class ConsumedWarningsHandler
: public ConsumedWarningsHandlerBase
{
2062 ConsumedWarningsHandler(Sema
&S
) : S(S
) {}
2064 void emitDiagnostics() override
{
2065 Warnings
.sort(SortDiagBySourceLocation(S
.getSourceManager()));
2066 for (const auto &Diag
: Warnings
) {
2067 S
.Diag(Diag
.first
.first
, Diag
.first
.second
);
2068 for (const auto &Note
: Diag
.second
)
2069 S
.Diag(Note
.first
, Note
.second
);
2073 void warnLoopStateMismatch(SourceLocation Loc
,
2074 StringRef VariableName
) override
{
2075 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_loop_state_mismatch
) <<
2078 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2081 void warnParamReturnTypestateMismatch(SourceLocation Loc
,
2082 StringRef VariableName
,
2083 StringRef ExpectedState
,
2084 StringRef ObservedState
) override
{
2086 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2087 diag::warn_param_return_typestate_mismatch
) << VariableName
<<
2088 ExpectedState
<< ObservedState
);
2090 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2093 void warnParamTypestateMismatch(SourceLocation Loc
, StringRef ExpectedState
,
2094 StringRef ObservedState
) override
{
2096 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2097 diag::warn_param_typestate_mismatch
) << ExpectedState
<< ObservedState
);
2099 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2102 void warnReturnTypestateForUnconsumableType(SourceLocation Loc
,
2103 StringRef TypeName
) override
{
2104 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2105 diag::warn_return_typestate_for_unconsumable_type
) << TypeName
);
2107 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2110 void warnReturnTypestateMismatch(SourceLocation Loc
, StringRef ExpectedState
,
2111 StringRef ObservedState
) override
{
2113 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2114 diag::warn_return_typestate_mismatch
) << ExpectedState
<< ObservedState
);
2116 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2119 void warnUseOfTempInInvalidState(StringRef MethodName
, StringRef State
,
2120 SourceLocation Loc
) override
{
2122 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2123 diag::warn_use_of_temp_in_invalid_state
) << MethodName
<< State
);
2125 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2128 void warnUseInInvalidState(StringRef MethodName
, StringRef VariableName
,
2129 StringRef State
, SourceLocation Loc
) override
{
2131 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_use_in_invalid_state
) <<
2132 MethodName
<< VariableName
<< State
);
2134 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2137 } // anonymous namespace
2138 } // namespace consumed
2139 } // namespace clang
2141 //===----------------------------------------------------------------------===//
2142 // AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
2143 // warnings on a function, method, or block.
2144 //===----------------------------------------------------------------------===//
2146 sema::AnalysisBasedWarnings::Policy::Policy() {
2147 enableCheckFallThrough
= 1;
2148 enableCheckUnreachable
= 0;
2149 enableThreadSafetyAnalysis
= 0;
2150 enableConsumedAnalysis
= 0;
2153 /// InterProceduralData aims to be a storage of whatever data should be passed
2154 /// between analyses of different functions.
2156 /// At the moment, its primary goal is to make the information gathered during
2157 /// the analysis of the blocks available during the analysis of the enclosing
2158 /// function. This is important due to the fact that blocks are analyzed before
2159 /// the enclosed function is even parsed fully, so it is not viable to access
2160 /// anything in the outer scope while analyzing the block. On the other hand,
2161 /// re-building CFG for blocks and re-analyzing them when we do have all the
2162 /// information (i.e. during the analysis of the enclosing function) seems to be
2164 class sema::AnalysisBasedWarnings::InterProceduralData
{
2166 // It is important to analyze blocks within functions because it's a very
2167 // common pattern to capture completion handler parameters by blocks.
2168 CalledOnceInterProceduralData CalledOnceData
;
2171 static unsigned isEnabled(DiagnosticsEngine
&D
, unsigned diag
) {
2172 return (unsigned)!D
.isIgnored(diag
, SourceLocation());
2175 sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema
&s
)
2176 : S(s
), IPData(std::make_unique
<InterProceduralData
>()),
2177 NumFunctionsAnalyzed(0), NumFunctionsWithBadCFGs(0), NumCFGBlocks(0),
2178 MaxCFGBlocksPerFunction(0), NumUninitAnalysisFunctions(0),
2179 NumUninitAnalysisVariables(0), MaxUninitAnalysisVariablesPerFunction(0),
2180 NumUninitAnalysisBlockVisits(0),
2181 MaxUninitAnalysisBlockVisitsPerFunction(0) {
2183 using namespace diag
;
2184 DiagnosticsEngine
&D
= S
.getDiagnostics();
2186 DefaultPolicy
.enableCheckUnreachable
=
2187 isEnabled(D
, warn_unreachable
) || isEnabled(D
, warn_unreachable_break
) ||
2188 isEnabled(D
, warn_unreachable_return
) ||
2189 isEnabled(D
, warn_unreachable_loop_increment
);
2191 DefaultPolicy
.enableThreadSafetyAnalysis
= isEnabled(D
, warn_double_lock
);
2193 DefaultPolicy
.enableConsumedAnalysis
=
2194 isEnabled(D
, warn_use_in_invalid_state
);
2197 // We need this here for unique_ptr with forward declared class.
2198 sema::AnalysisBasedWarnings::~AnalysisBasedWarnings() = default;
2200 static void flushDiagnostics(Sema
&S
, const sema::FunctionScopeInfo
*fscope
) {
2201 for (const auto &D
: fscope
->PossiblyUnreachableDiags
)
2202 S
.Diag(D
.Loc
, D
.PD
);
2205 void clang::sema::AnalysisBasedWarnings::IssueWarnings(
2206 sema::AnalysisBasedWarnings::Policy P
, sema::FunctionScopeInfo
*fscope
,
2207 const Decl
*D
, QualType BlockType
) {
2209 // We avoid doing analysis-based warnings when there are errors for
2211 // (1) The CFGs often can't be constructed (if the body is invalid), so
2212 // don't bother trying.
2213 // (2) The code already has problems; running the analysis just takes more
2215 DiagnosticsEngine
&Diags
= S
.getDiagnostics();
2217 // Do not do any analysis if we are going to just ignore them.
2218 if (Diags
.getIgnoreAllWarnings() ||
2219 (Diags
.getSuppressSystemWarnings() &&
2220 S
.SourceMgr
.isInSystemHeader(D
->getLocation())))
2223 // For code in dependent contexts, we'll do this at instantiation time.
2224 if (cast
<DeclContext
>(D
)->isDependentContext())
2227 if (S
.hasUncompilableErrorOccurred()) {
2228 // Flush out any possibly unreachable diagnostics.
2229 flushDiagnostics(S
, fscope
);
2233 const Stmt
*Body
= D
->getBody();
2236 // Construct the analysis context with the specified CFG build options.
2237 AnalysisDeclContext
AC(/* AnalysisDeclContextManager */ nullptr, D
);
2239 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2
2240 // explosion for destructors that can result and the compile time hit.
2241 AC
.getCFGBuildOptions().PruneTriviallyFalseEdges
= true;
2242 AC
.getCFGBuildOptions().AddEHEdges
= false;
2243 AC
.getCFGBuildOptions().AddInitializers
= true;
2244 AC
.getCFGBuildOptions().AddImplicitDtors
= true;
2245 AC
.getCFGBuildOptions().AddTemporaryDtors
= true;
2246 AC
.getCFGBuildOptions().AddCXXNewAllocator
= false;
2247 AC
.getCFGBuildOptions().AddCXXDefaultInitExprInCtors
= true;
2249 // Force that certain expressions appear as CFGElements in the CFG. This
2250 // is used to speed up various analyses.
2251 // FIXME: This isn't the right factoring. This is here for initial
2252 // prototyping, but we need a way for analyses to say what expressions they
2253 // expect to always be CFGElements and then fill in the BuildOptions
2254 // appropriately. This is essentially a layering violation.
2255 if (P
.enableCheckUnreachable
|| P
.enableThreadSafetyAnalysis
||
2256 P
.enableConsumedAnalysis
) {
2257 // Unreachable code analysis and thread safety require a linearized CFG.
2258 AC
.getCFGBuildOptions().setAllAlwaysAdd();
2261 AC
.getCFGBuildOptions()
2262 .setAlwaysAdd(Stmt::BinaryOperatorClass
)
2263 .setAlwaysAdd(Stmt::CompoundAssignOperatorClass
)
2264 .setAlwaysAdd(Stmt::BlockExprClass
)
2265 .setAlwaysAdd(Stmt::CStyleCastExprClass
)
2266 .setAlwaysAdd(Stmt::DeclRefExprClass
)
2267 .setAlwaysAdd(Stmt::ImplicitCastExprClass
)
2268 .setAlwaysAdd(Stmt::UnaryOperatorClass
);
2271 // Install the logical handler.
2272 llvm::Optional
<LogicalErrorHandler
> LEH
;
2273 if (LogicalErrorHandler::hasActiveDiagnostics(Diags
, D
->getBeginLoc())) {
2275 AC
.getCFGBuildOptions().Observer
= &*LEH
;
2278 // Emit delayed diagnostics.
2279 if (!fscope
->PossiblyUnreachableDiags
.empty()) {
2280 bool analyzed
= false;
2282 // Register the expressions with the CFGBuilder.
2283 for (const auto &D
: fscope
->PossiblyUnreachableDiags
) {
2284 for (const Stmt
*S
: D
.Stmts
)
2285 AC
.registerForcedBlockExpression(S
);
2290 for (const auto &D
: fscope
->PossiblyUnreachableDiags
) {
2291 bool AllReachable
= true;
2292 for (const Stmt
*S
: D
.Stmts
) {
2293 const CFGBlock
*block
= AC
.getBlockForRegisteredExpression(S
);
2294 CFGReverseBlockReachabilityAnalysis
*cra
=
2295 AC
.getCFGReachablityAnalysis();
2296 // FIXME: We should be able to assert that block is non-null, but
2297 // the CFG analysis can skip potentially-evaluated expressions in
2298 // edge cases; see test/Sema/vla-2.c.
2300 // Can this block be reached from the entrance?
2301 if (!cra
->isReachable(&AC
.getCFG()->getEntry(), block
)) {
2302 AllReachable
= false;
2306 // If we cannot map to a basic block, assume the statement is
2311 S
.Diag(D
.Loc
, D
.PD
);
2316 flushDiagnostics(S
, fscope
);
2319 // Warning: check missing 'return'
2320 if (P
.enableCheckFallThrough
) {
2321 const CheckFallThroughDiagnostics
&CD
=
2323 ? CheckFallThroughDiagnostics::MakeForBlock()
2324 : (isa
<CXXMethodDecl
>(D
) &&
2325 cast
<CXXMethodDecl
>(D
)->getOverloadedOperator() == OO_Call
&&
2326 cast
<CXXMethodDecl
>(D
)->getParent()->isLambda())
2327 ? CheckFallThroughDiagnostics::MakeForLambda()
2328 : (fscope
->isCoroutine()
2329 ? CheckFallThroughDiagnostics::MakeForCoroutine(D
)
2330 : CheckFallThroughDiagnostics::MakeForFunction(D
)));
2331 CheckFallThroughForBody(S
, D
, Body
, BlockType
, CD
, AC
, fscope
);
2334 // Warning: check for unreachable code
2335 if (P
.enableCheckUnreachable
) {
2336 // Only check for unreachable code on non-template instantiations.
2337 // Different template instantiations can effectively change the control-flow
2338 // and it is very difficult to prove that a snippet of code in a template
2339 // is unreachable for all instantiations.
2340 bool isTemplateInstantiation
= false;
2341 if (const FunctionDecl
*Function
= dyn_cast
<FunctionDecl
>(D
))
2342 isTemplateInstantiation
= Function
->isTemplateInstantiation();
2343 if (!isTemplateInstantiation
)
2344 CheckUnreachable(S
, AC
);
2347 // Check for thread safety violations
2348 if (P
.enableThreadSafetyAnalysis
) {
2349 SourceLocation FL
= AC
.getDecl()->getLocation();
2350 SourceLocation FEL
= AC
.getDecl()->getEndLoc();
2351 threadSafety::ThreadSafetyReporter
Reporter(S
, FL
, FEL
);
2352 if (!Diags
.isIgnored(diag::warn_thread_safety_beta
, D
->getBeginLoc()))
2353 Reporter
.setIssueBetaWarnings(true);
2354 if (!Diags
.isIgnored(diag::warn_thread_safety_verbose
, D
->getBeginLoc()))
2355 Reporter
.setVerbose(true);
2357 threadSafety::runThreadSafetyAnalysis(AC
, Reporter
,
2358 &S
.ThreadSafetyDeclCache
);
2359 Reporter
.emitDiagnostics();
2362 // Check for violations of consumed properties.
2363 if (P
.enableConsumedAnalysis
) {
2364 consumed::ConsumedWarningsHandler
WarningHandler(S
);
2365 consumed::ConsumedAnalyzer
Analyzer(WarningHandler
);
2369 if (!Diags
.isIgnored(diag::warn_uninit_var
, D
->getBeginLoc()) ||
2370 !Diags
.isIgnored(diag::warn_sometimes_uninit_var
, D
->getBeginLoc()) ||
2371 !Diags
.isIgnored(diag::warn_maybe_uninit_var
, D
->getBeginLoc()) ||
2372 !Diags
.isIgnored(diag::warn_uninit_const_reference
, D
->getBeginLoc())) {
2373 if (CFG
*cfg
= AC
.getCFG()) {
2374 UninitValsDiagReporter
reporter(S
);
2375 UninitVariablesAnalysisStats stats
;
2376 std::memset(&stats
, 0, sizeof(UninitVariablesAnalysisStats
));
2377 runUninitializedVariablesAnalysis(*cast
<DeclContext
>(D
), *cfg
, AC
,
2380 if (S
.CollectStats
&& stats
.NumVariablesAnalyzed
> 0) {
2381 ++NumUninitAnalysisFunctions
;
2382 NumUninitAnalysisVariables
+= stats
.NumVariablesAnalyzed
;
2383 NumUninitAnalysisBlockVisits
+= stats
.NumBlockVisits
;
2384 MaxUninitAnalysisVariablesPerFunction
=
2385 std::max(MaxUninitAnalysisVariablesPerFunction
,
2386 stats
.NumVariablesAnalyzed
);
2387 MaxUninitAnalysisBlockVisitsPerFunction
=
2388 std::max(MaxUninitAnalysisBlockVisitsPerFunction
,
2389 stats
.NumBlockVisits
);
2394 // Check for violations of "called once" parameter properties.
2395 if (S
.getLangOpts().ObjC
&& !S
.getLangOpts().CPlusPlus
&&
2396 shouldAnalyzeCalledOnceParameters(Diags
, D
->getBeginLoc())) {
2398 CalledOnceCheckReporter
Reporter(S
, IPData
->CalledOnceData
);
2399 checkCalledOnceParameters(
2401 shouldAnalyzeCalledOnceConventions(Diags
, D
->getBeginLoc()));
2405 bool FallThroughDiagFull
=
2406 !Diags
.isIgnored(diag::warn_unannotated_fallthrough
, D
->getBeginLoc());
2407 bool FallThroughDiagPerFunction
= !Diags
.isIgnored(
2408 diag::warn_unannotated_fallthrough_per_function
, D
->getBeginLoc());
2409 if (FallThroughDiagFull
|| FallThroughDiagPerFunction
||
2410 fscope
->HasFallthroughStmt
) {
2411 DiagnoseSwitchLabelsFallthrough(S
, AC
, !FallThroughDiagFull
);
2414 if (S
.getLangOpts().ObjCWeak
&&
2415 !Diags
.isIgnored(diag::warn_arc_repeated_use_of_weak
, D
->getBeginLoc()))
2416 diagnoseRepeatedUseOfWeak(S
, fscope
, D
, AC
.getParentMap());
2419 // Check for infinite self-recursion in functions
2420 if (!Diags
.isIgnored(diag::warn_infinite_recursive_function
,
2421 D
->getBeginLoc())) {
2422 if (const FunctionDecl
*FD
= dyn_cast
<FunctionDecl
>(D
)) {
2423 checkRecursiveFunction(S
, FD
, Body
, AC
);
2427 // Check for throw out of non-throwing function.
2428 if (!Diags
.isIgnored(diag::warn_throw_in_noexcept_func
, D
->getBeginLoc()))
2429 if (const FunctionDecl
*FD
= dyn_cast
<FunctionDecl
>(D
))
2430 if (S
.getLangOpts().CPlusPlus
&& isNoexcept(FD
))
2431 checkThrowInNonThrowingFunc(S
, FD
, AC
);
2433 // If none of the previous checks caused a CFG build, trigger one here
2434 // for the logical error handler.
2435 if (LogicalErrorHandler::hasActiveDiagnostics(Diags
, D
->getBeginLoc())) {
2439 // Collect statistics about the CFG if it was built.
2440 if (S
.CollectStats
&& AC
.isCFGBuilt()) {
2441 ++NumFunctionsAnalyzed
;
2442 if (CFG
*cfg
= AC
.getCFG()) {
2443 // If we successfully built a CFG for this context, record some more
2444 // detail information about it.
2445 NumCFGBlocks
+= cfg
->getNumBlockIDs();
2446 MaxCFGBlocksPerFunction
= std::max(MaxCFGBlocksPerFunction
,
2447 cfg
->getNumBlockIDs());
2449 ++NumFunctionsWithBadCFGs
;
2454 void clang::sema::AnalysisBasedWarnings::PrintStats() const {
2455 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
2457 unsigned NumCFGsBuilt
= NumFunctionsAnalyzed
- NumFunctionsWithBadCFGs
;
2458 unsigned AvgCFGBlocksPerFunction
=
2459 !NumCFGsBuilt
? 0 : NumCFGBlocks
/NumCFGsBuilt
;
2460 llvm::errs() << NumFunctionsAnalyzed
<< " functions analyzed ("
2461 << NumFunctionsWithBadCFGs
<< " w/o CFGs).\n"
2462 << " " << NumCFGBlocks
<< " CFG blocks built.\n"
2463 << " " << AvgCFGBlocksPerFunction
2464 << " average CFG blocks per function.\n"
2465 << " " << MaxCFGBlocksPerFunction
2466 << " max CFG blocks per function.\n";
2468 unsigned AvgUninitVariablesPerFunction
= !NumUninitAnalysisFunctions
? 0
2469 : NumUninitAnalysisVariables
/NumUninitAnalysisFunctions
;
2470 unsigned AvgUninitBlockVisitsPerFunction
= !NumUninitAnalysisFunctions
? 0
2471 : NumUninitAnalysisBlockVisits
/NumUninitAnalysisFunctions
;
2472 llvm::errs() << NumUninitAnalysisFunctions
2473 << " functions analyzed for uninitialiazed variables\n"
2474 << " " << NumUninitAnalysisVariables
<< " variables analyzed.\n"
2475 << " " << AvgUninitVariablesPerFunction
2476 << " average variables per function.\n"
2477 << " " << MaxUninitAnalysisVariablesPerFunction
2478 << " max variables per function.\n"
2479 << " " << NumUninitAnalysisBlockVisits
<< " block visits.\n"
2480 << " " << AvgUninitBlockVisitsPerFunction
2481 << " average block visits per function.\n"
2482 << " " << MaxUninitAnalysisBlockVisitsPerFunction
2483 << " max block visits per function.\n";