1 //=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines analysis_warnings::[Policy,Executor].
10 // Together they are used by Sema to issue warnings based on inexpensive
11 // static analysis algorithms in libAnalysis.
13 //===----------------------------------------------------------------------===//
15 #include "clang/Sema/AnalysisBasedWarnings.h"
16 #include "clang/AST/Decl.h"
17 #include "clang/AST/DeclCXX.h"
18 #include "clang/AST/DeclObjC.h"
19 #include "clang/AST/EvaluatedExprVisitor.h"
20 #include "clang/AST/Expr.h"
21 #include "clang/AST/ExprCXX.h"
22 #include "clang/AST/ExprObjC.h"
23 #include "clang/AST/OperationKinds.h"
24 #include "clang/AST/ParentMap.h"
25 #include "clang/AST/RecursiveASTVisitor.h"
26 #include "clang/AST/StmtCXX.h"
27 #include "clang/AST/StmtObjC.h"
28 #include "clang/AST/StmtVisitor.h"
29 #include "clang/AST/RecursiveASTVisitor.h"
30 #include "clang/AST/Type.h"
31 #include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
32 #include "clang/Analysis/Analyses/CalledOnceCheck.h"
33 #include "clang/Analysis/Analyses/Consumed.h"
34 #include "clang/Analysis/Analyses/ReachableCode.h"
35 #include "clang/Analysis/Analyses/ThreadSafety.h"
36 #include "clang/Analysis/Analyses/UninitializedValues.h"
37 #include "clang/Analysis/Analyses/UnsafeBufferUsage.h"
38 #include "clang/Analysis/AnalysisDeclContext.h"
39 #include "clang/Analysis/CFG.h"
40 #include "clang/Analysis/CFGStmtMap.h"
41 #include "clang/Basic/Diagnostic.h"
42 #include "clang/Basic/SourceLocation.h"
43 #include "clang/Basic/SourceManager.h"
44 #include "clang/Lex/Preprocessor.h"
45 #include "clang/Sema/ScopeInfo.h"
46 #include "clang/Sema/SemaInternal.h"
47 #include "llvm/ADT/ArrayRef.h"
48 #include "llvm/ADT/BitVector.h"
49 #include "llvm/ADT/MapVector.h"
50 #include "llvm/ADT/STLFunctionalExtras.h"
51 #include "llvm/ADT/SmallString.h"
52 #include "llvm/ADT/SmallVector.h"
53 #include "llvm/ADT/StringRef.h"
54 #include "llvm/Support/Casting.h"
60 using namespace clang
;
62 //===----------------------------------------------------------------------===//
63 // Unreachable code analysis.
64 //===----------------------------------------------------------------------===//
67 class UnreachableCodeHandler
: public reachable_code::Callback
{
69 SourceRange PreviousSilenceableCondVal
;
72 UnreachableCodeHandler(Sema
&s
) : S(s
) {}
74 void HandleUnreachable(reachable_code::UnreachableKind UK
, SourceLocation L
,
75 SourceRange SilenceableCondVal
, SourceRange R1
,
76 SourceRange R2
, bool HasFallThroughAttr
) override
{
77 // If the diagnosed code is `[[fallthrough]];` and
78 // `-Wunreachable-code-fallthrough` is enabled, suppress `code will never
79 // be executed` warning to avoid generating diagnostic twice
80 if (HasFallThroughAttr
&&
81 !S
.getDiagnostics().isIgnored(diag::warn_unreachable_fallthrough_attr
,
85 // Avoid reporting multiple unreachable code diagnostics that are
86 // triggered by the same conditional value.
87 if (PreviousSilenceableCondVal
.isValid() &&
88 SilenceableCondVal
.isValid() &&
89 PreviousSilenceableCondVal
== SilenceableCondVal
)
91 PreviousSilenceableCondVal
= SilenceableCondVal
;
93 unsigned diag
= diag::warn_unreachable
;
95 case reachable_code::UK_Break
:
96 diag
= diag::warn_unreachable_break
;
98 case reachable_code::UK_Return
:
99 diag
= diag::warn_unreachable_return
;
101 case reachable_code::UK_Loop_Increment
:
102 diag
= diag::warn_unreachable_loop_increment
;
104 case reachable_code::UK_Other
:
108 S
.Diag(L
, diag
) << R1
<< R2
;
110 SourceLocation Open
= SilenceableCondVal
.getBegin();
111 if (Open
.isValid()) {
112 SourceLocation Close
= SilenceableCondVal
.getEnd();
113 Close
= S
.getLocForEndOfToken(Close
);
114 if (Close
.isValid()) {
115 S
.Diag(Open
, diag::note_unreachable_silence
)
116 << FixItHint::CreateInsertion(Open
, "/* DISABLES CODE */ (")
117 << FixItHint::CreateInsertion(Close
, ")");
122 } // anonymous namespace
124 /// CheckUnreachable - Check for unreachable code.
125 static void CheckUnreachable(Sema
&S
, AnalysisDeclContext
&AC
) {
126 // As a heuristic prune all diagnostics not in the main file. Currently
127 // the majority of warnings in headers are false positives. These
128 // are largely caused by configuration state, e.g. preprocessor
129 // defined code, etc.
131 // Note that this is also a performance optimization. Analyzing
132 // headers many times can be expensive.
133 if (!S
.getSourceManager().isInMainFile(AC
.getDecl()->getBeginLoc()))
136 UnreachableCodeHandler
UC(S
);
137 reachable_code::FindUnreachableCode(AC
, S
.getPreprocessor(), UC
);
141 /// Warn on logical operator errors in CFGBuilder
142 class LogicalErrorHandler
: public CFGCallback
{
146 LogicalErrorHandler(Sema
&S
) : S(S
) {}
148 static bool HasMacroID(const Expr
*E
) {
149 if (E
->getExprLoc().isMacroID())
152 // Recurse to children.
153 for (const Stmt
*SubStmt
: E
->children())
154 if (const Expr
*SubExpr
= dyn_cast_or_null
<Expr
>(SubStmt
))
155 if (HasMacroID(SubExpr
))
161 void logicAlwaysTrue(const BinaryOperator
*B
, bool isAlwaysTrue
) override
{
165 unsigned DiagID
= isAlwaysTrue
166 ? diag::warn_tautological_negation_or_compare
167 : diag::warn_tautological_negation_and_compare
;
168 SourceRange DiagRange
= B
->getSourceRange();
169 S
.Diag(B
->getExprLoc(), DiagID
) << DiagRange
;
172 void compareAlwaysTrue(const BinaryOperator
*B
, bool isAlwaysTrue
) override
{
176 SourceRange DiagRange
= B
->getSourceRange();
177 S
.Diag(B
->getExprLoc(), diag::warn_tautological_overlap_comparison
)
178 << DiagRange
<< isAlwaysTrue
;
181 void compareBitwiseEquality(const BinaryOperator
*B
,
182 bool isAlwaysTrue
) override
{
186 SourceRange DiagRange
= B
->getSourceRange();
187 S
.Diag(B
->getExprLoc(), diag::warn_comparison_bitwise_always
)
188 << DiagRange
<< isAlwaysTrue
;
191 void compareBitwiseOr(const BinaryOperator
*B
) override
{
195 SourceRange DiagRange
= B
->getSourceRange();
196 S
.Diag(B
->getExprLoc(), diag::warn_comparison_bitwise_or
) << DiagRange
;
199 static bool hasActiveDiagnostics(DiagnosticsEngine
&Diags
,
200 SourceLocation Loc
) {
201 return !Diags
.isIgnored(diag::warn_tautological_overlap_comparison
, Loc
) ||
202 !Diags
.isIgnored(diag::warn_comparison_bitwise_or
, Loc
) ||
203 !Diags
.isIgnored(diag::warn_tautological_negation_and_compare
, Loc
);
206 } // anonymous namespace
208 //===----------------------------------------------------------------------===//
209 // Check for infinite self-recursion in functions
210 //===----------------------------------------------------------------------===//
212 // Returns true if the function is called anywhere within the CFGBlock.
213 // For member functions, the additional condition of being call from the
214 // this pointer is required.
215 static bool hasRecursiveCallInPath(const FunctionDecl
*FD
, CFGBlock
&Block
) {
216 // Process all the Stmt's in this block to find any calls to FD.
217 for (const auto &B
: Block
) {
218 if (B
.getKind() != CFGElement::Statement
)
221 const CallExpr
*CE
= dyn_cast
<CallExpr
>(B
.getAs
<CFGStmt
>()->getStmt());
222 if (!CE
|| !CE
->getCalleeDecl() ||
223 CE
->getCalleeDecl()->getCanonicalDecl() != FD
)
226 // Skip function calls which are qualified with a templated class.
227 if (const DeclRefExpr
*DRE
=
228 dyn_cast
<DeclRefExpr
>(CE
->getCallee()->IgnoreParenImpCasts())) {
229 if (NestedNameSpecifier
*NNS
= DRE
->getQualifier()) {
230 if (NNS
->getKind() == NestedNameSpecifier::TypeSpec
&&
231 isa
<TemplateSpecializationType
>(NNS
->getAsType())) {
237 const CXXMemberCallExpr
*MCE
= dyn_cast
<CXXMemberCallExpr
>(CE
);
238 if (!MCE
|| isa
<CXXThisExpr
>(MCE
->getImplicitObjectArgument()) ||
239 !MCE
->getMethodDecl()->isVirtual())
245 // Returns true if every path from the entry block passes through a call to FD.
246 static bool checkForRecursiveFunctionCall(const FunctionDecl
*FD
, CFG
*cfg
) {
247 llvm::SmallPtrSet
<CFGBlock
*, 16> Visited
;
248 llvm::SmallVector
<CFGBlock
*, 16> WorkList
;
249 // Keep track of whether we found at least one recursive path.
250 bool foundRecursion
= false;
252 const unsigned ExitID
= cfg
->getExit().getBlockID();
254 // Seed the work list with the entry block.
255 WorkList
.push_back(&cfg
->getEntry());
257 while (!WorkList
.empty()) {
258 CFGBlock
*Block
= WorkList
.pop_back_val();
260 for (auto I
= Block
->succ_begin(), E
= Block
->succ_end(); I
!= E
; ++I
) {
261 if (CFGBlock
*SuccBlock
= *I
) {
262 if (!Visited
.insert(SuccBlock
).second
)
265 // Found a path to the exit node without a recursive call.
266 if (ExitID
== SuccBlock
->getBlockID())
269 // If the successor block contains a recursive call, end analysis there.
270 if (hasRecursiveCallInPath(FD
, *SuccBlock
)) {
271 foundRecursion
= true;
275 WorkList
.push_back(SuccBlock
);
279 return foundRecursion
;
282 static void checkRecursiveFunction(Sema
&S
, const FunctionDecl
*FD
,
283 const Stmt
*Body
, AnalysisDeclContext
&AC
) {
284 FD
= FD
->getCanonicalDecl();
286 // Only run on non-templated functions and non-templated members of
287 // templated classes.
288 if (FD
->getTemplatedKind() != FunctionDecl::TK_NonTemplate
&&
289 FD
->getTemplatedKind() != FunctionDecl::TK_MemberSpecialization
)
292 CFG
*cfg
= AC
.getCFG();
295 // If the exit block is unreachable, skip processing the function.
296 if (cfg
->getExit().pred_empty())
299 // Emit diagnostic if a recursive function call is detected for all paths.
300 if (checkForRecursiveFunctionCall(FD
, cfg
))
301 S
.Diag(Body
->getBeginLoc(), diag::warn_infinite_recursive_function
);
304 //===----------------------------------------------------------------------===//
305 // Check for throw in a non-throwing function.
306 //===----------------------------------------------------------------------===//
308 /// Determine whether an exception thrown by E, unwinding from ThrowBlock,
309 /// can reach ExitBlock.
310 static bool throwEscapes(Sema
&S
, const CXXThrowExpr
*E
, CFGBlock
&ThrowBlock
,
312 SmallVector
<CFGBlock
*, 16> Stack
;
313 llvm::BitVector
Queued(Body
->getNumBlockIDs());
315 Stack
.push_back(&ThrowBlock
);
316 Queued
[ThrowBlock
.getBlockID()] = true;
318 while (!Stack
.empty()) {
319 CFGBlock
&UnwindBlock
= *Stack
.back();
322 for (auto &Succ
: UnwindBlock
.succs()) {
323 if (!Succ
.isReachable() || Queued
[Succ
->getBlockID()])
326 if (Succ
->getBlockID() == Body
->getExit().getBlockID())
330 dyn_cast_or_null
<CXXCatchStmt
>(Succ
->getLabel())) {
331 QualType Caught
= Catch
->getCaughtType();
332 if (Caught
.isNull() || // catch (...) catches everything
333 !E
->getSubExpr() || // throw; is considered cuaght by any handler
334 S
.handlerCanCatch(Caught
, E
->getSubExpr()->getType()))
335 // Exception doesn't escape via this path.
338 Stack
.push_back(Succ
);
339 Queued
[Succ
->getBlockID()] = true;
347 static void visitReachableThrows(
349 llvm::function_ref
<void(const CXXThrowExpr
*, CFGBlock
&)> Visit
) {
350 llvm::BitVector
Reachable(BodyCFG
->getNumBlockIDs());
351 clang::reachable_code::ScanReachableFromBlock(&BodyCFG
->getEntry(), Reachable
);
352 for (CFGBlock
*B
: *BodyCFG
) {
353 if (!Reachable
[B
->getBlockID()])
355 for (CFGElement
&E
: *B
) {
356 std::optional
<CFGStmt
> S
= E
.getAs
<CFGStmt
>();
359 if (auto *Throw
= dyn_cast
<CXXThrowExpr
>(S
->getStmt()))
365 static void EmitDiagForCXXThrowInNonThrowingFunc(Sema
&S
, SourceLocation OpLoc
,
366 const FunctionDecl
*FD
) {
367 if (!S
.getSourceManager().isInSystemHeader(OpLoc
) &&
368 FD
->getTypeSourceInfo()) {
369 S
.Diag(OpLoc
, diag::warn_throw_in_noexcept_func
) << FD
;
370 if (S
.getLangOpts().CPlusPlus11
&&
371 (isa
<CXXDestructorDecl
>(FD
) ||
372 FD
->getDeclName().getCXXOverloadedOperator() == OO_Delete
||
373 FD
->getDeclName().getCXXOverloadedOperator() == OO_Array_Delete
)) {
374 if (const auto *Ty
= FD
->getTypeSourceInfo()->getType()->
375 getAs
<FunctionProtoType
>())
376 S
.Diag(FD
->getLocation(), diag::note_throw_in_dtor
)
377 << !isa
<CXXDestructorDecl
>(FD
) << !Ty
->hasExceptionSpec()
378 << FD
->getExceptionSpecSourceRange();
380 S
.Diag(FD
->getLocation(), diag::note_throw_in_function
)
381 << FD
->getExceptionSpecSourceRange();
385 static void checkThrowInNonThrowingFunc(Sema
&S
, const FunctionDecl
*FD
,
386 AnalysisDeclContext
&AC
) {
387 CFG
*BodyCFG
= AC
.getCFG();
390 if (BodyCFG
->getExit().pred_empty())
392 visitReachableThrows(BodyCFG
, [&](const CXXThrowExpr
*Throw
, CFGBlock
&Block
) {
393 if (throwEscapes(S
, Throw
, Block
, BodyCFG
))
394 EmitDiagForCXXThrowInNonThrowingFunc(S
, Throw
->getThrowLoc(), FD
);
398 static bool isNoexcept(const FunctionDecl
*FD
) {
399 const auto *FPT
= FD
->getType()->castAs
<FunctionProtoType
>();
400 if (FPT
->isNothrow() || FD
->hasAttr
<NoThrowAttr
>())
405 //===----------------------------------------------------------------------===//
406 // Check for missing return value.
407 //===----------------------------------------------------------------------===//
409 enum ControlFlowKind
{
414 NeverFallThroughOrReturn
417 /// CheckFallThrough - Check that we don't fall off the end of a
418 /// Statement that should return a value.
420 /// \returns AlwaysFallThrough iff we always fall off the end of the statement,
421 /// MaybeFallThrough iff we might or might not fall off the end,
422 /// NeverFallThroughOrReturn iff we never fall off the end of the statement or
423 /// return. We assume NeverFallThrough iff we never fall off the end of the
424 /// statement but we may return. We assume that functions not marked noreturn
426 static ControlFlowKind
CheckFallThrough(AnalysisDeclContext
&AC
) {
427 CFG
*cfg
= AC
.getCFG();
428 if (!cfg
) return UnknownFallThrough
;
430 // The CFG leaves in dead things, and we don't want the dead code paths to
431 // confuse us, so we mark all live things first.
432 llvm::BitVector
live(cfg
->getNumBlockIDs());
433 unsigned count
= reachable_code::ScanReachableFromBlock(&cfg
->getEntry(),
436 bool AddEHEdges
= AC
.getAddEHEdges();
437 if (!AddEHEdges
&& count
!= cfg
->getNumBlockIDs())
438 // When there are things remaining dead, and we didn't add EH edges
439 // from CallExprs to the catch clauses, we have to go back and
440 // mark them as live.
441 for (const auto *B
: *cfg
) {
442 if (!live
[B
->getBlockID()]) {
443 if (B
->pred_begin() == B
->pred_end()) {
444 const Stmt
*Term
= B
->getTerminatorStmt();
445 if (Term
&& isa
<CXXTryStmt
>(Term
))
446 // When not adding EH edges from calls, catch clauses
447 // can otherwise seem dead. Avoid noting them as dead.
448 count
+= reachable_code::ScanReachableFromBlock(B
, live
);
454 // Now we know what is live, we check the live precessors of the exit block
455 // and look for fall through paths, being careful to ignore normal returns,
456 // and exceptional paths.
457 bool HasLiveReturn
= false;
458 bool HasFakeEdge
= false;
459 bool HasPlainEdge
= false;
460 bool HasAbnormalEdge
= false;
462 // Ignore default cases that aren't likely to be reachable because all
463 // enums in a switch(X) have explicit case statements.
464 CFGBlock::FilterOptions FO
;
465 FO
.IgnoreDefaultsWithCoveredEnums
= 1;
467 for (CFGBlock::filtered_pred_iterator I
=
468 cfg
->getExit().filtered_pred_start_end(FO
);
470 const CFGBlock
&B
= **I
;
471 if (!live
[B
.getBlockID()])
474 // Skip blocks which contain an element marked as no-return. They don't
475 // represent actually viable edges into the exit block, so mark them as
477 if (B
.hasNoReturnElement()) {
478 HasAbnormalEdge
= true;
482 // Destructors can appear after the 'return' in the CFG. This is
483 // normal. We need to look pass the destructors for the return
484 // statement (if it exists).
485 CFGBlock::const_reverse_iterator ri
= B
.rbegin(), re
= B
.rend();
487 for ( ; ri
!= re
; ++ri
)
488 if (ri
->getAs
<CFGStmt
>())
491 // No more CFGElements in the block?
493 const Stmt
*Term
= B
.getTerminatorStmt();
494 if (Term
&& (isa
<CXXTryStmt
>(Term
) || isa
<ObjCAtTryStmt
>(Term
))) {
495 HasAbnormalEdge
= true;
498 // A labeled empty statement, or the entry block...
503 CFGStmt CS
= ri
->castAs
<CFGStmt
>();
504 const Stmt
*S
= CS
.getStmt();
505 if (isa
<ReturnStmt
>(S
) || isa
<CoreturnStmt
>(S
)) {
506 HasLiveReturn
= true;
509 if (isa
<ObjCAtThrowStmt
>(S
)) {
513 if (isa
<CXXThrowExpr
>(S
)) {
517 if (isa
<MSAsmStmt
>(S
)) {
518 // TODO: Verify this is correct.
520 HasLiveReturn
= true;
523 if (isa
<CXXTryStmt
>(S
)) {
524 HasAbnormalEdge
= true;
527 if (!llvm::is_contained(B
.succs(), &cfg
->getExit())) {
528 HasAbnormalEdge
= true;
536 return NeverFallThrough
;
537 return NeverFallThroughOrReturn
;
539 if (HasAbnormalEdge
|| HasFakeEdge
|| HasLiveReturn
)
540 return MaybeFallThrough
;
541 // This says AlwaysFallThrough for calls to functions that are not marked
542 // noreturn, that don't return. If people would like this warning to be more
543 // accurate, such functions should be marked as noreturn.
544 return AlwaysFallThrough
;
549 struct CheckFallThroughDiagnostics
{
550 unsigned diag_MaybeFallThrough_HasNoReturn
;
551 unsigned diag_MaybeFallThrough_ReturnsNonVoid
;
552 unsigned diag_AlwaysFallThrough_HasNoReturn
;
553 unsigned diag_AlwaysFallThrough_ReturnsNonVoid
;
554 unsigned diag_NeverFallThroughOrReturn
;
555 enum { Function
, Block
, Lambda
, Coroutine
} funMode
;
556 SourceLocation FuncLoc
;
558 static CheckFallThroughDiagnostics
MakeForFunction(const Decl
*Func
) {
559 CheckFallThroughDiagnostics D
;
560 D
.FuncLoc
= Func
->getLocation();
561 D
.diag_MaybeFallThrough_HasNoReturn
=
562 diag::warn_falloff_noreturn_function
;
563 D
.diag_MaybeFallThrough_ReturnsNonVoid
=
564 diag::warn_maybe_falloff_nonvoid_function
;
565 D
.diag_AlwaysFallThrough_HasNoReturn
=
566 diag::warn_falloff_noreturn_function
;
567 D
.diag_AlwaysFallThrough_ReturnsNonVoid
=
568 diag::warn_falloff_nonvoid_function
;
570 // Don't suggest that virtual functions be marked "noreturn", since they
571 // might be overridden by non-noreturn functions.
572 bool isVirtualMethod
= false;
573 if (const CXXMethodDecl
*Method
= dyn_cast
<CXXMethodDecl
>(Func
))
574 isVirtualMethod
= Method
->isVirtual();
576 // Don't suggest that template instantiations be marked "noreturn"
577 bool isTemplateInstantiation
= false;
578 if (const FunctionDecl
*Function
= dyn_cast
<FunctionDecl
>(Func
))
579 isTemplateInstantiation
= Function
->isTemplateInstantiation();
581 if (!isVirtualMethod
&& !isTemplateInstantiation
)
582 D
.diag_NeverFallThroughOrReturn
=
583 diag::warn_suggest_noreturn_function
;
585 D
.diag_NeverFallThroughOrReturn
= 0;
587 D
.funMode
= Function
;
591 static CheckFallThroughDiagnostics
MakeForCoroutine(const Decl
*Func
) {
592 CheckFallThroughDiagnostics D
;
593 D
.FuncLoc
= Func
->getLocation();
594 D
.diag_MaybeFallThrough_HasNoReturn
= 0;
595 D
.diag_MaybeFallThrough_ReturnsNonVoid
=
596 diag::warn_maybe_falloff_nonvoid_coroutine
;
597 D
.diag_AlwaysFallThrough_HasNoReturn
= 0;
598 D
.diag_AlwaysFallThrough_ReturnsNonVoid
=
599 diag::warn_falloff_nonvoid_coroutine
;
600 D
.diag_NeverFallThroughOrReturn
= 0;
601 D
.funMode
= Coroutine
;
605 static CheckFallThroughDiagnostics
MakeForBlock() {
606 CheckFallThroughDiagnostics D
;
607 D
.diag_MaybeFallThrough_HasNoReturn
=
608 diag::err_noreturn_block_has_return_expr
;
609 D
.diag_MaybeFallThrough_ReturnsNonVoid
=
610 diag::err_maybe_falloff_nonvoid_block
;
611 D
.diag_AlwaysFallThrough_HasNoReturn
=
612 diag::err_noreturn_block_has_return_expr
;
613 D
.diag_AlwaysFallThrough_ReturnsNonVoid
=
614 diag::err_falloff_nonvoid_block
;
615 D
.diag_NeverFallThroughOrReturn
= 0;
620 static CheckFallThroughDiagnostics
MakeForLambda() {
621 CheckFallThroughDiagnostics D
;
622 D
.diag_MaybeFallThrough_HasNoReturn
=
623 diag::err_noreturn_lambda_has_return_expr
;
624 D
.diag_MaybeFallThrough_ReturnsNonVoid
=
625 diag::warn_maybe_falloff_nonvoid_lambda
;
626 D
.diag_AlwaysFallThrough_HasNoReturn
=
627 diag::err_noreturn_lambda_has_return_expr
;
628 D
.diag_AlwaysFallThrough_ReturnsNonVoid
=
629 diag::warn_falloff_nonvoid_lambda
;
630 D
.diag_NeverFallThroughOrReturn
= 0;
635 bool checkDiagnostics(DiagnosticsEngine
&D
, bool ReturnsVoid
,
636 bool HasNoReturn
) const {
637 if (funMode
== Function
) {
638 return (ReturnsVoid
||
639 D
.isIgnored(diag::warn_maybe_falloff_nonvoid_function
,
642 D
.isIgnored(diag::warn_noreturn_function_has_return_expr
,
645 D
.isIgnored(diag::warn_suggest_noreturn_block
, FuncLoc
));
647 if (funMode
== Coroutine
) {
648 return (ReturnsVoid
||
649 D
.isIgnored(diag::warn_maybe_falloff_nonvoid_function
, FuncLoc
) ||
650 D
.isIgnored(diag::warn_maybe_falloff_nonvoid_coroutine
,
654 // For blocks / lambdas.
655 return ReturnsVoid
&& !HasNoReturn
;
659 } // anonymous namespace
661 /// CheckFallThroughForBody - Check that we don't fall off the end of a
662 /// function that should return a value. Check that we don't fall off the end
663 /// of a noreturn function. We assume that functions and blocks not marked
664 /// noreturn will return.
665 static void CheckFallThroughForBody(Sema
&S
, const Decl
*D
, const Stmt
*Body
,
667 const CheckFallThroughDiagnostics
&CD
,
668 AnalysisDeclContext
&AC
,
669 sema::FunctionScopeInfo
*FSI
) {
671 bool ReturnsVoid
= false;
672 bool HasNoReturn
= false;
673 bool IsCoroutine
= FSI
->isCoroutine();
675 if (const auto *FD
= dyn_cast
<FunctionDecl
>(D
)) {
676 if (const auto *CBody
= dyn_cast
<CoroutineBodyStmt
>(Body
))
677 ReturnsVoid
= CBody
->getFallthroughHandler() != nullptr;
679 ReturnsVoid
= FD
->getReturnType()->isVoidType();
680 HasNoReturn
= FD
->isNoReturn();
682 else if (const auto *MD
= dyn_cast
<ObjCMethodDecl
>(D
)) {
683 ReturnsVoid
= MD
->getReturnType()->isVoidType();
684 HasNoReturn
= MD
->hasAttr
<NoReturnAttr
>();
686 else if (isa
<BlockDecl
>(D
)) {
687 if (const FunctionType
*FT
=
688 BlockType
->getPointeeType()->getAs
<FunctionType
>()) {
689 if (FT
->getReturnType()->isVoidType())
691 if (FT
->getNoReturnAttr())
696 DiagnosticsEngine
&Diags
= S
.getDiagnostics();
698 // Short circuit for compilation speed.
699 if (CD
.checkDiagnostics(Diags
, ReturnsVoid
, HasNoReturn
))
701 SourceLocation LBrace
= Body
->getBeginLoc(), RBrace
= Body
->getEndLoc();
702 auto EmitDiag
= [&](SourceLocation Loc
, unsigned DiagID
) {
704 S
.Diag(Loc
, DiagID
) << FSI
->CoroutinePromise
->getType();
709 // cpu_dispatch functions permit empty function bodies for ICC compatibility.
710 if (D
->getAsFunction() && D
->getAsFunction()->isCPUDispatchMultiVersion())
713 // Either in a function body compound statement, or a function-try-block.
714 switch (CheckFallThrough(AC
)) {
715 case UnknownFallThrough
:
718 case MaybeFallThrough
:
720 EmitDiag(RBrace
, CD
.diag_MaybeFallThrough_HasNoReturn
);
721 else if (!ReturnsVoid
)
722 EmitDiag(RBrace
, CD
.diag_MaybeFallThrough_ReturnsNonVoid
);
724 case AlwaysFallThrough
:
726 EmitDiag(RBrace
, CD
.diag_AlwaysFallThrough_HasNoReturn
);
727 else if (!ReturnsVoid
)
728 EmitDiag(RBrace
, CD
.diag_AlwaysFallThrough_ReturnsNonVoid
);
730 case NeverFallThroughOrReturn
:
731 if (ReturnsVoid
&& !HasNoReturn
&& CD
.diag_NeverFallThroughOrReturn
) {
732 if (const FunctionDecl
*FD
= dyn_cast
<FunctionDecl
>(D
)) {
733 S
.Diag(LBrace
, CD
.diag_NeverFallThroughOrReturn
) << 0 << FD
;
734 } else if (const ObjCMethodDecl
*MD
= dyn_cast
<ObjCMethodDecl
>(D
)) {
735 S
.Diag(LBrace
, CD
.diag_NeverFallThroughOrReturn
) << 1 << MD
;
737 S
.Diag(LBrace
, CD
.diag_NeverFallThroughOrReturn
);
741 case NeverFallThrough
:
746 //===----------------------------------------------------------------------===//
748 //===----------------------------------------------------------------------===//
751 /// ContainsReference - A visitor class to search for references to
752 /// a particular declaration (the needle) within any evaluated component of an
753 /// expression (recursively).
754 class ContainsReference
: public ConstEvaluatedExprVisitor
<ContainsReference
> {
756 const DeclRefExpr
*Needle
;
759 typedef ConstEvaluatedExprVisitor
<ContainsReference
> Inherited
;
761 ContainsReference(ASTContext
&Context
, const DeclRefExpr
*Needle
)
762 : Inherited(Context
), FoundReference(false), Needle(Needle
) {}
764 void VisitExpr(const Expr
*E
) {
765 // Stop evaluating if we already have a reference.
769 Inherited::VisitExpr(E
);
772 void VisitDeclRefExpr(const DeclRefExpr
*E
) {
774 FoundReference
= true;
776 Inherited::VisitDeclRefExpr(E
);
779 bool doesContainReference() const { return FoundReference
; }
781 } // anonymous namespace
783 static bool SuggestInitializationFixit(Sema
&S
, const VarDecl
*VD
) {
784 QualType VariableTy
= VD
->getType().getCanonicalType();
785 if (VariableTy
->isBlockPointerType() &&
786 !VD
->hasAttr
<BlocksAttr
>()) {
787 S
.Diag(VD
->getLocation(), diag::note_block_var_fixit_add_initialization
)
789 << FixItHint::CreateInsertion(VD
->getLocation(), "__block ");
793 // Don't issue a fixit if there is already an initializer.
797 // Don't suggest a fixit inside macros.
798 if (VD
->getEndLoc().isMacroID())
801 SourceLocation Loc
= S
.getLocForEndOfToken(VD
->getEndLoc());
803 // Suggest possible initialization (if any).
804 std::string Init
= S
.getFixItZeroInitializerForType(VariableTy
, Loc
);
808 S
.Diag(Loc
, diag::note_var_fixit_add_initialization
) << VD
->getDeclName()
809 << FixItHint::CreateInsertion(Loc
, Init
);
813 /// Create a fixit to remove an if-like statement, on the assumption that its
814 /// condition is CondVal.
815 static void CreateIfFixit(Sema
&S
, const Stmt
*If
, const Stmt
*Then
,
816 const Stmt
*Else
, bool CondVal
,
817 FixItHint
&Fixit1
, FixItHint
&Fixit2
) {
819 // If condition is always true, remove all but the 'then'.
820 Fixit1
= FixItHint::CreateRemoval(
821 CharSourceRange::getCharRange(If
->getBeginLoc(), Then
->getBeginLoc()));
823 SourceLocation ElseKwLoc
= S
.getLocForEndOfToken(Then
->getEndLoc());
825 FixItHint::CreateRemoval(SourceRange(ElseKwLoc
, Else
->getEndLoc()));
828 // If condition is always false, remove all but the 'else'.
830 Fixit1
= FixItHint::CreateRemoval(CharSourceRange::getCharRange(
831 If
->getBeginLoc(), Else
->getBeginLoc()));
833 Fixit1
= FixItHint::CreateRemoval(If
->getSourceRange());
837 /// DiagUninitUse -- Helper function to produce a diagnostic for an
838 /// uninitialized use of a variable.
839 static void DiagUninitUse(Sema
&S
, const VarDecl
*VD
, const UninitUse
&Use
,
840 bool IsCapturedByBlock
) {
841 bool Diagnosed
= false;
843 switch (Use
.getKind()) {
844 case UninitUse::Always
:
845 S
.Diag(Use
.getUser()->getBeginLoc(), diag::warn_uninit_var
)
846 << VD
->getDeclName() << IsCapturedByBlock
847 << Use
.getUser()->getSourceRange();
850 case UninitUse::AfterDecl
:
851 case UninitUse::AfterCall
:
852 S
.Diag(VD
->getLocation(), diag::warn_sometimes_uninit_var
)
853 << VD
->getDeclName() << IsCapturedByBlock
854 << (Use
.getKind() == UninitUse::AfterDecl
? 4 : 5)
855 << const_cast<DeclContext
*>(VD
->getLexicalDeclContext())
856 << VD
->getSourceRange();
857 S
.Diag(Use
.getUser()->getBeginLoc(), diag::note_uninit_var_use
)
858 << IsCapturedByBlock
<< Use
.getUser()->getSourceRange();
861 case UninitUse::Maybe
:
862 case UninitUse::Sometimes
:
863 // Carry on to report sometimes-uninitialized branches, if possible,
864 // or a 'may be used uninitialized' diagnostic otherwise.
868 // Diagnose each branch which leads to a sometimes-uninitialized use.
869 for (UninitUse::branch_iterator I
= Use
.branch_begin(), E
= Use
.branch_end();
871 assert(Use
.getKind() == UninitUse::Sometimes
);
873 const Expr
*User
= Use
.getUser();
874 const Stmt
*Term
= I
->Terminator
;
876 // Information used when building the diagnostic.
881 // FixIts to suppress the diagnostic by removing the dead condition.
882 // For all binary terminators, branch 0 is taken if the condition is true,
883 // and branch 1 is taken if the condition is false.
884 int RemoveDiagKind
= -1;
885 const char *FixitStr
=
886 S
.getLangOpts().CPlusPlus
? (I
->Output
? "true" : "false")
887 : (I
->Output
? "1" : "0");
888 FixItHint Fixit1
, Fixit2
;
890 switch (Term
? Term
->getStmtClass() : Stmt::DeclStmtClass
) {
892 // Don't know how to report this. Just fall back to 'may be used
893 // uninitialized'. FIXME: Can this happen?
896 // "condition is true / condition is false".
897 case Stmt::IfStmtClass
: {
898 const IfStmt
*IS
= cast
<IfStmt
>(Term
);
901 Range
= IS
->getCond()->getSourceRange();
903 CreateIfFixit(S
, IS
, IS
->getThen(), IS
->getElse(),
904 I
->Output
, Fixit1
, Fixit2
);
907 case Stmt::ConditionalOperatorClass
: {
908 const ConditionalOperator
*CO
= cast
<ConditionalOperator
>(Term
);
911 Range
= CO
->getCond()->getSourceRange();
913 CreateIfFixit(S
, CO
, CO
->getTrueExpr(), CO
->getFalseExpr(),
914 I
->Output
, Fixit1
, Fixit2
);
917 case Stmt::BinaryOperatorClass
: {
918 const BinaryOperator
*BO
= cast
<BinaryOperator
>(Term
);
919 if (!BO
->isLogicalOp())
922 Str
= BO
->getOpcodeStr();
923 Range
= BO
->getLHS()->getSourceRange();
925 if ((BO
->getOpcode() == BO_LAnd
&& I
->Output
) ||
926 (BO
->getOpcode() == BO_LOr
&& !I
->Output
))
927 // true && y -> y, false || y -> y.
928 Fixit1
= FixItHint::CreateRemoval(
929 SourceRange(BO
->getBeginLoc(), BO
->getOperatorLoc()));
931 // false && y -> false, true || y -> true.
932 Fixit1
= FixItHint::CreateReplacement(BO
->getSourceRange(), FixitStr
);
936 // "loop is entered / loop is exited".
937 case Stmt::WhileStmtClass
:
940 Range
= cast
<WhileStmt
>(Term
)->getCond()->getSourceRange();
942 Fixit1
= FixItHint::CreateReplacement(Range
, FixitStr
);
944 case Stmt::ForStmtClass
:
947 Range
= cast
<ForStmt
>(Term
)->getCond()->getSourceRange();
950 Fixit1
= FixItHint::CreateRemoval(Range
);
952 Fixit1
= FixItHint::CreateReplacement(Range
, FixitStr
);
954 case Stmt::CXXForRangeStmtClass
:
955 if (I
->Output
== 1) {
956 // The use occurs if a range-based for loop's body never executes.
957 // That may be impossible, and there's no syntactic fix for this,
958 // so treat it as a 'may be uninitialized' case.
963 Range
= cast
<CXXForRangeStmt
>(Term
)->getRangeInit()->getSourceRange();
966 // "condition is true / loop is exited".
967 case Stmt::DoStmtClass
:
970 Range
= cast
<DoStmt
>(Term
)->getCond()->getSourceRange();
972 Fixit1
= FixItHint::CreateReplacement(Range
, FixitStr
);
975 // "switch case is taken".
976 case Stmt::CaseStmtClass
:
979 Range
= cast
<CaseStmt
>(Term
)->getLHS()->getSourceRange();
981 case Stmt::DefaultStmtClass
:
984 Range
= cast
<DefaultStmt
>(Term
)->getDefaultLoc();
988 S
.Diag(Range
.getBegin(), diag::warn_sometimes_uninit_var
)
989 << VD
->getDeclName() << IsCapturedByBlock
<< DiagKind
990 << Str
<< I
->Output
<< Range
;
991 S
.Diag(User
->getBeginLoc(), diag::note_uninit_var_use
)
992 << IsCapturedByBlock
<< User
->getSourceRange();
993 if (RemoveDiagKind
!= -1)
994 S
.Diag(Fixit1
.RemoveRange
.getBegin(), diag::note_uninit_fixit_remove_cond
)
995 << RemoveDiagKind
<< Str
<< I
->Output
<< Fixit1
<< Fixit2
;
1001 S
.Diag(Use
.getUser()->getBeginLoc(), diag::warn_maybe_uninit_var
)
1002 << VD
->getDeclName() << IsCapturedByBlock
1003 << Use
.getUser()->getSourceRange();
1006 /// Diagnose uninitialized const reference usages.
1007 static bool DiagnoseUninitializedConstRefUse(Sema
&S
, const VarDecl
*VD
,
1008 const UninitUse
&Use
) {
1009 S
.Diag(Use
.getUser()->getBeginLoc(), diag::warn_uninit_const_reference
)
1010 << VD
->getDeclName() << Use
.getUser()->getSourceRange();
1014 /// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
1015 /// uninitialized variable. This manages the different forms of diagnostic
1016 /// emitted for particular types of uses. Returns true if the use was diagnosed
1017 /// as a warning. If a particular use is one we omit warnings for, returns
1019 static bool DiagnoseUninitializedUse(Sema
&S
, const VarDecl
*VD
,
1020 const UninitUse
&Use
,
1021 bool alwaysReportSelfInit
= false) {
1022 if (const DeclRefExpr
*DRE
= dyn_cast
<DeclRefExpr
>(Use
.getUser())) {
1023 // Inspect the initializer of the variable declaration which is
1024 // being referenced prior to its initialization. We emit
1025 // specialized diagnostics for self-initialization, and we
1026 // specifically avoid warning about self references which take the
1031 // This is used to indicate to GCC that 'x' is intentionally left
1032 // uninitialized. Proven code paths which access 'x' in
1033 // an uninitialized state after this will still warn.
1034 if (const Expr
*Initializer
= VD
->getInit()) {
1035 if (!alwaysReportSelfInit
&& DRE
== Initializer
->IgnoreParenImpCasts())
1038 ContainsReference
CR(S
.Context
, DRE
);
1039 CR
.Visit(Initializer
);
1040 if (CR
.doesContainReference()) {
1041 S
.Diag(DRE
->getBeginLoc(), diag::warn_uninit_self_reference_in_init
)
1042 << VD
->getDeclName() << VD
->getLocation() << DRE
->getSourceRange();
1047 DiagUninitUse(S
, VD
, Use
, false);
1049 const BlockExpr
*BE
= cast
<BlockExpr
>(Use
.getUser());
1050 if (VD
->getType()->isBlockPointerType() && !VD
->hasAttr
<BlocksAttr
>())
1051 S
.Diag(BE
->getBeginLoc(),
1052 diag::warn_uninit_byref_blockvar_captured_by_block
)
1053 << VD
->getDeclName()
1054 << VD
->getType().getQualifiers().hasObjCLifetime();
1056 DiagUninitUse(S
, VD
, Use
, true);
1059 // Report where the variable was declared when the use wasn't within
1060 // the initializer of that declaration & we didn't already suggest
1061 // an initialization fixit.
1062 if (!SuggestInitializationFixit(S
, VD
))
1063 S
.Diag(VD
->getBeginLoc(), diag::note_var_declared_here
)
1064 << VD
->getDeclName();
1070 class FallthroughMapper
: public RecursiveASTVisitor
<FallthroughMapper
> {
1072 FallthroughMapper(Sema
&S
)
1073 : FoundSwitchStatements(false),
1077 bool foundSwitchStatements() const { return FoundSwitchStatements
; }
1079 void markFallthroughVisited(const AttributedStmt
*Stmt
) {
1080 bool Found
= FallthroughStmts
.erase(Stmt
);
1085 typedef llvm::SmallPtrSet
<const AttributedStmt
*, 8> AttrStmts
;
1087 const AttrStmts
&getFallthroughStmts() const {
1088 return FallthroughStmts
;
1091 void fillReachableBlocks(CFG
*Cfg
) {
1092 assert(ReachableBlocks
.empty() && "ReachableBlocks already filled");
1093 std::deque
<const CFGBlock
*> BlockQueue
;
1095 ReachableBlocks
.insert(&Cfg
->getEntry());
1096 BlockQueue
.push_back(&Cfg
->getEntry());
1097 // Mark all case blocks reachable to avoid problems with switching on
1098 // constants, covered enums, etc.
1099 // These blocks can contain fall-through annotations, and we don't want to
1100 // issue a warn_fallthrough_attr_unreachable for them.
1101 for (const auto *B
: *Cfg
) {
1102 const Stmt
*L
= B
->getLabel();
1103 if (L
&& isa
<SwitchCase
>(L
) && ReachableBlocks
.insert(B
).second
)
1104 BlockQueue
.push_back(B
);
1107 while (!BlockQueue
.empty()) {
1108 const CFGBlock
*P
= BlockQueue
.front();
1109 BlockQueue
.pop_front();
1110 for (const CFGBlock
*B
: P
->succs()) {
1111 if (B
&& ReachableBlocks
.insert(B
).second
)
1112 BlockQueue
.push_back(B
);
1117 bool checkFallThroughIntoBlock(const CFGBlock
&B
, int &AnnotatedCnt
,
1118 bool IsTemplateInstantiation
) {
1119 assert(!ReachableBlocks
.empty() && "ReachableBlocks empty");
1121 int UnannotatedCnt
= 0;
1124 std::deque
<const CFGBlock
*> BlockQueue(B
.pred_begin(), B
.pred_end());
1125 while (!BlockQueue
.empty()) {
1126 const CFGBlock
*P
= BlockQueue
.front();
1127 BlockQueue
.pop_front();
1130 const Stmt
*Term
= P
->getTerminatorStmt();
1131 if (Term
&& isa
<SwitchStmt
>(Term
))
1132 continue; // Switch statement, good.
1134 const SwitchCase
*SW
= dyn_cast_or_null
<SwitchCase
>(P
->getLabel());
1135 if (SW
&& SW
->getSubStmt() == B
.getLabel() && P
->begin() == P
->end())
1136 continue; // Previous case label has no statements, good.
1138 const LabelStmt
*L
= dyn_cast_or_null
<LabelStmt
>(P
->getLabel());
1139 if (L
&& L
->getSubStmt() == B
.getLabel() && P
->begin() == P
->end())
1140 continue; // Case label is preceded with a normal label, good.
1142 if (!ReachableBlocks
.count(P
)) {
1143 for (const CFGElement
&Elem
: llvm::reverse(*P
)) {
1144 if (std::optional
<CFGStmt
> CS
= Elem
.getAs
<CFGStmt
>()) {
1145 if (const AttributedStmt
*AS
= asFallThroughAttr(CS
->getStmt())) {
1146 // Don't issue a warning for an unreachable fallthrough
1147 // attribute in template instantiations as it may not be
1148 // unreachable in all instantiations of the template.
1149 if (!IsTemplateInstantiation
)
1150 S
.Diag(AS
->getBeginLoc(),
1151 diag::warn_unreachable_fallthrough_attr
);
1152 markFallthroughVisited(AS
);
1156 // Don't care about other unreachable statements.
1159 // If there are no unreachable statements, this may be a special
1162 // A a; // A has a destructor.
1165 // // <<<< This place is represented by a 'hanging' CFG block.
1170 const Stmt
*LastStmt
= getLastStmt(*P
);
1171 if (const AttributedStmt
*AS
= asFallThroughAttr(LastStmt
)) {
1172 markFallthroughVisited(AS
);
1174 continue; // Fallthrough annotation, good.
1177 if (!LastStmt
) { // This block contains no executable statements.
1178 // Traverse its predecessors.
1179 std::copy(P
->pred_begin(), P
->pred_end(),
1180 std::back_inserter(BlockQueue
));
1186 return !!UnannotatedCnt
;
1189 // RecursiveASTVisitor setup.
1190 bool shouldWalkTypesOfTypeLocs() const { return false; }
1192 bool VisitAttributedStmt(AttributedStmt
*S
) {
1193 if (asFallThroughAttr(S
))
1194 FallthroughStmts
.insert(S
);
1198 bool VisitSwitchStmt(SwitchStmt
*S
) {
1199 FoundSwitchStatements
= true;
1203 // We don't want to traverse local type declarations. We analyze their
1204 // methods separately.
1205 bool TraverseDecl(Decl
*D
) { return true; }
1207 // We analyze lambda bodies separately. Skip them here.
1208 bool TraverseLambdaExpr(LambdaExpr
*LE
) {
1209 // Traverse the captures, but not the body.
1210 for (const auto C
: zip(LE
->captures(), LE
->capture_inits()))
1211 TraverseLambdaCapture(LE
, &std::get
<0>(C
), std::get
<1>(C
));
1217 static const AttributedStmt
*asFallThroughAttr(const Stmt
*S
) {
1218 if (const AttributedStmt
*AS
= dyn_cast_or_null
<AttributedStmt
>(S
)) {
1219 if (hasSpecificAttr
<FallThroughAttr
>(AS
->getAttrs()))
1225 static const Stmt
*getLastStmt(const CFGBlock
&B
) {
1226 if (const Stmt
*Term
= B
.getTerminatorStmt())
1228 for (const CFGElement
&Elem
: llvm::reverse(B
))
1229 if (std::optional
<CFGStmt
> CS
= Elem
.getAs
<CFGStmt
>())
1230 return CS
->getStmt();
1231 // Workaround to detect a statement thrown out by CFGBuilder:
1232 // case X: {} case Y:
1233 // case X: ; case Y:
1234 if (const SwitchCase
*SW
= dyn_cast_or_null
<SwitchCase
>(B
.getLabel()))
1235 if (!isa
<SwitchCase
>(SW
->getSubStmt()))
1236 return SW
->getSubStmt();
1241 bool FoundSwitchStatements
;
1242 AttrStmts FallthroughStmts
;
1244 llvm::SmallPtrSet
<const CFGBlock
*, 16> ReachableBlocks
;
1246 } // anonymous namespace
1248 static StringRef
getFallthroughAttrSpelling(Preprocessor
&PP
,
1249 SourceLocation Loc
) {
1250 TokenValue FallthroughTokens
[] = {
1251 tok::l_square
, tok::l_square
,
1252 PP
.getIdentifierInfo("fallthrough"),
1253 tok::r_square
, tok::r_square
1256 TokenValue ClangFallthroughTokens
[] = {
1257 tok::l_square
, tok::l_square
, PP
.getIdentifierInfo("clang"),
1258 tok::coloncolon
, PP
.getIdentifierInfo("fallthrough"),
1259 tok::r_square
, tok::r_square
1262 bool PreferClangAttr
= !PP
.getLangOpts().CPlusPlus17
&& !PP
.getLangOpts().C23
;
1264 StringRef MacroName
;
1265 if (PreferClangAttr
)
1266 MacroName
= PP
.getLastMacroWithSpelling(Loc
, ClangFallthroughTokens
);
1267 if (MacroName
.empty())
1268 MacroName
= PP
.getLastMacroWithSpelling(Loc
, FallthroughTokens
);
1269 if (MacroName
.empty() && !PreferClangAttr
)
1270 MacroName
= PP
.getLastMacroWithSpelling(Loc
, ClangFallthroughTokens
);
1271 if (MacroName
.empty()) {
1272 if (!PreferClangAttr
)
1273 MacroName
= "[[fallthrough]]";
1274 else if (PP
.getLangOpts().CPlusPlus
)
1275 MacroName
= "[[clang::fallthrough]]";
1277 MacroName
= "__attribute__((fallthrough))";
1282 static void DiagnoseSwitchLabelsFallthrough(Sema
&S
, AnalysisDeclContext
&AC
,
1284 FallthroughMapper
FM(S
);
1285 FM
.TraverseStmt(AC
.getBody());
1287 if (!FM
.foundSwitchStatements())
1290 if (PerFunction
&& FM
.getFallthroughStmts().empty())
1293 CFG
*Cfg
= AC
.getCFG();
1298 FM
.fillReachableBlocks(Cfg
);
1300 for (const CFGBlock
*B
: llvm::reverse(*Cfg
)) {
1301 const Stmt
*Label
= B
->getLabel();
1303 if (!isa_and_nonnull
<SwitchCase
>(Label
))
1308 bool IsTemplateInstantiation
= false;
1309 if (const FunctionDecl
*Function
= dyn_cast
<FunctionDecl
>(AC
.getDecl()))
1310 IsTemplateInstantiation
= Function
->isTemplateInstantiation();
1311 if (!FM
.checkFallThroughIntoBlock(*B
, AnnotatedCnt
,
1312 IsTemplateInstantiation
))
1315 S
.Diag(Label
->getBeginLoc(),
1316 PerFunction
? diag::warn_unannotated_fallthrough_per_function
1317 : diag::warn_unannotated_fallthrough
);
1319 if (!AnnotatedCnt
) {
1320 SourceLocation L
= Label
->getBeginLoc();
1324 const Stmt
*Term
= B
->getTerminatorStmt();
1325 // Skip empty cases.
1326 while (B
->empty() && !Term
&& B
->succ_size() == 1) {
1327 B
= *B
->succ_begin();
1328 Term
= B
->getTerminatorStmt();
1330 if (!(B
->empty() && Term
&& isa
<BreakStmt
>(Term
))) {
1331 Preprocessor
&PP
= S
.getPreprocessor();
1332 StringRef AnnotationSpelling
= getFallthroughAttrSpelling(PP
, L
);
1333 SmallString
<64> TextToInsert(AnnotationSpelling
);
1334 TextToInsert
+= "; ";
1335 S
.Diag(L
, diag::note_insert_fallthrough_fixit
)
1336 << AnnotationSpelling
1337 << FixItHint::CreateInsertion(L
, TextToInsert
);
1339 S
.Diag(L
, diag::note_insert_break_fixit
)
1340 << FixItHint::CreateInsertion(L
, "break; ");
1344 for (const auto *F
: FM
.getFallthroughStmts())
1345 S
.Diag(F
->getBeginLoc(), diag::err_fallthrough_attr_invalid_placement
);
1348 static bool isInLoop(const ASTContext
&Ctx
, const ParentMap
&PM
,
1353 switch (S
->getStmtClass()) {
1354 case Stmt::ForStmtClass
:
1355 case Stmt::WhileStmtClass
:
1356 case Stmt::CXXForRangeStmtClass
:
1357 case Stmt::ObjCForCollectionStmtClass
:
1359 case Stmt::DoStmtClass
: {
1360 Expr::EvalResult Result
;
1361 if (!cast
<DoStmt
>(S
)->getCond()->EvaluateAsInt(Result
, Ctx
))
1363 return Result
.Val
.getInt().getBoolValue();
1368 } while ((S
= PM
.getParent(S
)));
1373 static void diagnoseRepeatedUseOfWeak(Sema
&S
,
1374 const sema::FunctionScopeInfo
*CurFn
,
1376 const ParentMap
&PM
) {
1377 typedef sema::FunctionScopeInfo::WeakObjectProfileTy WeakObjectProfileTy
;
1378 typedef sema::FunctionScopeInfo::WeakObjectUseMap WeakObjectUseMap
;
1379 typedef sema::FunctionScopeInfo::WeakUseVector WeakUseVector
;
1380 typedef std::pair
<const Stmt
*, WeakObjectUseMap::const_iterator
>
1383 ASTContext
&Ctx
= S
.getASTContext();
1385 const WeakObjectUseMap
&WeakMap
= CurFn
->getWeakObjectUses();
1387 // Extract all weak objects that are referenced more than once.
1388 SmallVector
<StmtUsesPair
, 8> UsesByStmt
;
1389 for (WeakObjectUseMap::const_iterator I
= WeakMap
.begin(), E
= WeakMap
.end();
1391 const WeakUseVector
&Uses
= I
->second
;
1393 // Find the first read of the weak object.
1394 WeakUseVector::const_iterator UI
= Uses
.begin(), UE
= Uses
.end();
1395 for ( ; UI
!= UE
; ++UI
) {
1400 // If there were only writes to this object, don't warn.
1404 // If there was only one read, followed by any number of writes, and the
1405 // read is not within a loop, don't warn. Additionally, don't warn in a
1406 // loop if the base object is a local variable -- local variables are often
1407 // changed in loops.
1408 if (UI
== Uses
.begin()) {
1409 WeakUseVector::const_iterator UI2
= UI
;
1410 for (++UI2
; UI2
!= UE
; ++UI2
)
1411 if (UI2
->isUnsafe())
1415 if (!isInLoop(Ctx
, PM
, UI
->getUseExpr()))
1418 const WeakObjectProfileTy
&Profile
= I
->first
;
1419 if (!Profile
.isExactProfile())
1422 const NamedDecl
*Base
= Profile
.getBase();
1424 Base
= Profile
.getProperty();
1425 assert(Base
&& "A profile always has a base or property.");
1427 if (const VarDecl
*BaseVar
= dyn_cast
<VarDecl
>(Base
))
1428 if (BaseVar
->hasLocalStorage() && !isa
<ParmVarDecl
>(Base
))
1433 UsesByStmt
.push_back(StmtUsesPair(UI
->getUseExpr(), I
));
1436 if (UsesByStmt
.empty())
1439 // Sort by first use so that we emit the warnings in a deterministic order.
1440 SourceManager
&SM
= S
.getSourceManager();
1441 llvm::sort(UsesByStmt
,
1442 [&SM
](const StmtUsesPair
&LHS
, const StmtUsesPair
&RHS
) {
1443 return SM
.isBeforeInTranslationUnit(LHS
.first
->getBeginLoc(),
1444 RHS
.first
->getBeginLoc());
1447 // Classify the current code body for better warning text.
1448 // This enum should stay in sync with the cases in
1449 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
1450 // FIXME: Should we use a common classification enum and the same set of
1451 // possibilities all throughout Sema?
1459 if (isa
<sema::BlockScopeInfo
>(CurFn
))
1460 FunctionKind
= Block
;
1461 else if (isa
<sema::LambdaScopeInfo
>(CurFn
))
1462 FunctionKind
= Lambda
;
1463 else if (isa
<ObjCMethodDecl
>(D
))
1464 FunctionKind
= Method
;
1466 FunctionKind
= Function
;
1468 // Iterate through the sorted problems and emit warnings for each.
1469 for (const auto &P
: UsesByStmt
) {
1470 const Stmt
*FirstRead
= P
.first
;
1471 const WeakObjectProfileTy
&Key
= P
.second
->first
;
1472 const WeakUseVector
&Uses
= P
.second
->second
;
1474 // For complicated expressions like 'a.b.c' and 'x.b.c', WeakObjectProfileTy
1475 // may not contain enough information to determine that these are different
1476 // properties. We can only be 100% sure of a repeated use in certain cases,
1477 // and we adjust the diagnostic kind accordingly so that the less certain
1478 // case can be turned off if it is too noisy.
1480 if (Key
.isExactProfile())
1481 DiagKind
= diag::warn_arc_repeated_use_of_weak
;
1483 DiagKind
= diag::warn_arc_possible_repeated_use_of_weak
;
1485 // Classify the weak object being accessed for better warning text.
1486 // This enum should stay in sync with the cases in
1487 // warn_arc_repeated_use_of_weak and warn_arc_possible_repeated_use_of_weak.
1495 const NamedDecl
*KeyProp
= Key
.getProperty();
1496 if (isa
<VarDecl
>(KeyProp
))
1497 ObjectKind
= Variable
;
1498 else if (isa
<ObjCPropertyDecl
>(KeyProp
))
1499 ObjectKind
= Property
;
1500 else if (isa
<ObjCMethodDecl
>(KeyProp
))
1501 ObjectKind
= ImplicitProperty
;
1502 else if (isa
<ObjCIvarDecl
>(KeyProp
))
1505 llvm_unreachable("Unexpected weak object kind!");
1507 // Do not warn about IBOutlet weak property receivers being set to null
1508 // since they are typically only used from the main thread.
1509 if (const ObjCPropertyDecl
*Prop
= dyn_cast
<ObjCPropertyDecl
>(KeyProp
))
1510 if (Prop
->hasAttr
<IBOutletAttr
>())
1513 // Show the first time the object was read.
1514 S
.Diag(FirstRead
->getBeginLoc(), DiagKind
)
1515 << int(ObjectKind
) << KeyProp
<< int(FunctionKind
)
1516 << FirstRead
->getSourceRange();
1518 // Print all the other accesses as notes.
1519 for (const auto &Use
: Uses
) {
1520 if (Use
.getUseExpr() == FirstRead
)
1522 S
.Diag(Use
.getUseExpr()->getBeginLoc(),
1523 diag::note_arc_weak_also_accessed_here
)
1524 << Use
.getUseExpr()->getSourceRange();
1531 typedef SmallVector
<PartialDiagnosticAt
, 1> OptionalNotes
;
1532 typedef std::pair
<PartialDiagnosticAt
, OptionalNotes
> DelayedDiag
;
1533 typedef std::list
<DelayedDiag
> DiagList
;
1535 struct SortDiagBySourceLocation
{
1537 SortDiagBySourceLocation(SourceManager
&SM
) : SM(SM
) {}
1539 bool operator()(const DelayedDiag
&left
, const DelayedDiag
&right
) {
1540 // Although this call will be slow, this is only called when outputting
1541 // multiple warnings.
1542 return SM
.isBeforeInTranslationUnit(left
.first
.first
, right
.first
.first
);
1545 } // anonymous namespace
1546 } // namespace clang
1549 class UninitValsDiagReporter
: public UninitVariablesHandler
{
1551 typedef SmallVector
<UninitUse
, 2> UsesVec
;
1552 typedef llvm::PointerIntPair
<UsesVec
*, 1, bool> MappedType
;
1553 // Prefer using MapVector to DenseMap, so that iteration order will be
1554 // the same as insertion order. This is needed to obtain a deterministic
1555 // order of diagnostics when calling flushDiagnostics().
1556 typedef llvm::MapVector
<const VarDecl
*, MappedType
> UsesMap
;
1558 UsesMap constRefUses
;
1561 UninitValsDiagReporter(Sema
&S
) : S(S
) {}
1562 ~UninitValsDiagReporter() override
{ flushDiagnostics(); }
1564 MappedType
&getUses(UsesMap
&um
, const VarDecl
*vd
) {
1565 MappedType
&V
= um
[vd
];
1566 if (!V
.getPointer())
1567 V
.setPointer(new UsesVec());
1571 void handleUseOfUninitVariable(const VarDecl
*vd
,
1572 const UninitUse
&use
) override
{
1573 getUses(uses
, vd
).getPointer()->push_back(use
);
1576 void handleConstRefUseOfUninitVariable(const VarDecl
*vd
,
1577 const UninitUse
&use
) override
{
1578 getUses(constRefUses
, vd
).getPointer()->push_back(use
);
1581 void handleSelfInit(const VarDecl
*vd
) override
{
1582 getUses(uses
, vd
).setInt(true);
1583 getUses(constRefUses
, vd
).setInt(true);
1586 void flushDiagnostics() {
1587 for (const auto &P
: uses
) {
1588 const VarDecl
*vd
= P
.first
;
1589 const MappedType
&V
= P
.second
;
1591 UsesVec
*vec
= V
.getPointer();
1592 bool hasSelfInit
= V
.getInt();
1594 // Specially handle the case where we have uses of an uninitialized
1595 // variable, but the root cause is an idiomatic self-init. We want
1596 // to report the diagnostic at the self-init since that is the root cause.
1597 if (!vec
->empty() && hasSelfInit
&& hasAlwaysUninitializedUse(vec
))
1598 DiagnoseUninitializedUse(S
, vd
,
1599 UninitUse(vd
->getInit()->IgnoreParenCasts(),
1600 /* isAlwaysUninit */ true),
1601 /* alwaysReportSelfInit */ true);
1603 // Sort the uses by their SourceLocations. While not strictly
1604 // guaranteed to produce them in line/column order, this will provide
1605 // a stable ordering.
1606 llvm::sort(*vec
, [](const UninitUse
&a
, const UninitUse
&b
) {
1607 // Prefer a more confident report over a less confident one.
1608 if (a
.getKind() != b
.getKind())
1609 return a
.getKind() > b
.getKind();
1610 return a
.getUser()->getBeginLoc() < b
.getUser()->getBeginLoc();
1613 for (const auto &U
: *vec
) {
1614 // If we have self-init, downgrade all uses to 'may be uninitialized'.
1615 UninitUse Use
= hasSelfInit
? UninitUse(U
.getUser(), false) : U
;
1617 if (DiagnoseUninitializedUse(S
, vd
, Use
))
1618 // Skip further diagnostics for this variable. We try to warn only
1619 // on the first point at which a variable is used uninitialized.
1624 // Release the uses vector.
1630 // Flush all const reference uses diags.
1631 for (const auto &P
: constRefUses
) {
1632 const VarDecl
*vd
= P
.first
;
1633 const MappedType
&V
= P
.second
;
1635 UsesVec
*vec
= V
.getPointer();
1636 bool hasSelfInit
= V
.getInt();
1638 if (!vec
->empty() && hasSelfInit
&& hasAlwaysUninitializedUse(vec
))
1639 DiagnoseUninitializedUse(S
, vd
,
1640 UninitUse(vd
->getInit()->IgnoreParenCasts(),
1641 /* isAlwaysUninit */ true),
1642 /* alwaysReportSelfInit */ true);
1644 for (const auto &U
: *vec
) {
1645 if (DiagnoseUninitializedConstRefUse(S
, vd
, U
))
1650 // Release the uses vector.
1654 constRefUses
.clear();
1658 static bool hasAlwaysUninitializedUse(const UsesVec
* vec
) {
1659 return llvm::any_of(*vec
, [](const UninitUse
&U
) {
1660 return U
.getKind() == UninitUse::Always
||
1661 U
.getKind() == UninitUse::AfterCall
||
1662 U
.getKind() == UninitUse::AfterDecl
;
1667 /// Inter-procedural data for the called-once checker.
1668 class CalledOnceInterProceduralData
{
1670 // Add the delayed warning for the given block.
1671 void addDelayedWarning(const BlockDecl
*Block
,
1672 PartialDiagnosticAt
&&Warning
) {
1673 DelayedBlockWarnings
[Block
].emplace_back(std::move(Warning
));
1675 // Report all of the warnings we've gathered for the given block.
1676 void flushWarnings(const BlockDecl
*Block
, Sema
&S
) {
1677 for (const PartialDiagnosticAt
&Delayed
: DelayedBlockWarnings
[Block
])
1678 S
.Diag(Delayed
.first
, Delayed
.second
);
1680 discardWarnings(Block
);
1682 // Discard all of the warnings we've gathered for the given block.
1683 void discardWarnings(const BlockDecl
*Block
) {
1684 DelayedBlockWarnings
.erase(Block
);
1688 using DelayedDiagnostics
= SmallVector
<PartialDiagnosticAt
, 2>;
1689 llvm::DenseMap
<const BlockDecl
*, DelayedDiagnostics
> DelayedBlockWarnings
;
1692 class CalledOnceCheckReporter
: public CalledOnceCheckHandler
{
1694 CalledOnceCheckReporter(Sema
&S
, CalledOnceInterProceduralData
&Data
)
1695 : S(S
), Data(Data
) {}
1696 void handleDoubleCall(const ParmVarDecl
*Parameter
, const Expr
*Call
,
1697 const Expr
*PrevCall
, bool IsCompletionHandler
,
1698 bool Poised
) override
{
1699 auto DiagToReport
= IsCompletionHandler
1700 ? diag::warn_completion_handler_called_twice
1701 : diag::warn_called_once_gets_called_twice
;
1702 S
.Diag(Call
->getBeginLoc(), DiagToReport
) << Parameter
;
1703 S
.Diag(PrevCall
->getBeginLoc(), diag::note_called_once_gets_called_twice
)
1707 void handleNeverCalled(const ParmVarDecl
*Parameter
,
1708 bool IsCompletionHandler
) override
{
1709 auto DiagToReport
= IsCompletionHandler
1710 ? diag::warn_completion_handler_never_called
1711 : diag::warn_called_once_never_called
;
1712 S
.Diag(Parameter
->getBeginLoc(), DiagToReport
)
1713 << Parameter
<< /* Captured */ false;
1716 void handleNeverCalled(const ParmVarDecl
*Parameter
, const Decl
*Function
,
1717 const Stmt
*Where
, NeverCalledReason Reason
,
1718 bool IsCalledDirectly
,
1719 bool IsCompletionHandler
) override
{
1720 auto DiagToReport
= IsCompletionHandler
1721 ? diag::warn_completion_handler_never_called_when
1722 : diag::warn_called_once_never_called_when
;
1723 PartialDiagnosticAt
Warning(Where
->getBeginLoc(), S
.PDiag(DiagToReport
)
1726 << (unsigned)Reason
);
1728 if (const auto *Block
= dyn_cast
<BlockDecl
>(Function
)) {
1729 // We shouldn't report these warnings on blocks immediately
1730 Data
.addDelayedWarning(Block
, std::move(Warning
));
1732 S
.Diag(Warning
.first
, Warning
.second
);
1736 void handleCapturedNeverCalled(const ParmVarDecl
*Parameter
,
1738 bool IsCompletionHandler
) override
{
1739 auto DiagToReport
= IsCompletionHandler
1740 ? diag::warn_completion_handler_never_called
1741 : diag::warn_called_once_never_called
;
1742 S
.Diag(Where
->getBeginLoc(), DiagToReport
)
1743 << Parameter
<< /* Captured */ true;
1747 handleBlockThatIsGuaranteedToBeCalledOnce(const BlockDecl
*Block
) override
{
1748 Data
.flushWarnings(Block
, S
);
1751 void handleBlockWithNoGuarantees(const BlockDecl
*Block
) override
{
1752 Data
.discardWarnings(Block
);
1757 CalledOnceInterProceduralData
&Data
;
1760 constexpr unsigned CalledOnceWarnings
[] = {
1761 diag::warn_called_once_never_called
,
1762 diag::warn_called_once_never_called_when
,
1763 diag::warn_called_once_gets_called_twice
};
1765 constexpr unsigned CompletionHandlerWarnings
[]{
1766 diag::warn_completion_handler_never_called
,
1767 diag::warn_completion_handler_never_called_when
,
1768 diag::warn_completion_handler_called_twice
};
1770 bool shouldAnalyzeCalledOnceImpl(llvm::ArrayRef
<unsigned> DiagIDs
,
1771 const DiagnosticsEngine
&Diags
,
1772 SourceLocation At
) {
1773 return llvm::any_of(DiagIDs
, [&Diags
, At
](unsigned DiagID
) {
1774 return !Diags
.isIgnored(DiagID
, At
);
1778 bool shouldAnalyzeCalledOnceConventions(const DiagnosticsEngine
&Diags
,
1779 SourceLocation At
) {
1780 return shouldAnalyzeCalledOnceImpl(CompletionHandlerWarnings
, Diags
, At
);
1783 bool shouldAnalyzeCalledOnceParameters(const DiagnosticsEngine
&Diags
,
1784 SourceLocation At
) {
1785 return shouldAnalyzeCalledOnceImpl(CalledOnceWarnings
, Diags
, At
) ||
1786 shouldAnalyzeCalledOnceConventions(Diags
, At
);
1788 } // anonymous namespace
1790 //===----------------------------------------------------------------------===//
1792 //===----------------------------------------------------------------------===//
1794 namespace threadSafety
{
1796 class ThreadSafetyReporter
: public clang::threadSafety::ThreadSafetyHandler
{
1799 SourceLocation FunLocation
, FunEndLocation
;
1801 const FunctionDecl
*CurrentFunction
;
1804 OptionalNotes
getNotes() const {
1805 if (Verbose
&& CurrentFunction
) {
1806 PartialDiagnosticAt
FNote(CurrentFunction
->getBody()->getBeginLoc(),
1807 S
.PDiag(diag::note_thread_warning_in_fun
)
1808 << CurrentFunction
);
1809 return OptionalNotes(1, FNote
);
1811 return OptionalNotes();
1814 OptionalNotes
getNotes(const PartialDiagnosticAt
&Note
) const {
1815 OptionalNotes
ONS(1, Note
);
1816 if (Verbose
&& CurrentFunction
) {
1817 PartialDiagnosticAt
FNote(CurrentFunction
->getBody()->getBeginLoc(),
1818 S
.PDiag(diag::note_thread_warning_in_fun
)
1819 << CurrentFunction
);
1820 ONS
.push_back(std::move(FNote
));
1825 OptionalNotes
getNotes(const PartialDiagnosticAt
&Note1
,
1826 const PartialDiagnosticAt
&Note2
) const {
1828 ONS
.push_back(Note1
);
1829 ONS
.push_back(Note2
);
1830 if (Verbose
&& CurrentFunction
) {
1831 PartialDiagnosticAt
FNote(CurrentFunction
->getBody()->getBeginLoc(),
1832 S
.PDiag(diag::note_thread_warning_in_fun
)
1833 << CurrentFunction
);
1834 ONS
.push_back(std::move(FNote
));
1839 OptionalNotes
makeLockedHereNote(SourceLocation LocLocked
, StringRef Kind
) {
1840 return LocLocked
.isValid()
1841 ? getNotes(PartialDiagnosticAt(
1842 LocLocked
, S
.PDiag(diag::note_locked_here
) << Kind
))
1846 OptionalNotes
makeUnlockedHereNote(SourceLocation LocUnlocked
,
1848 return LocUnlocked
.isValid()
1849 ? getNotes(PartialDiagnosticAt(
1850 LocUnlocked
, S
.PDiag(diag::note_unlocked_here
) << Kind
))
1855 ThreadSafetyReporter(Sema
&S
, SourceLocation FL
, SourceLocation FEL
)
1856 : S(S
), FunLocation(FL
), FunEndLocation(FEL
),
1857 CurrentFunction(nullptr), Verbose(false) {}
1859 void setVerbose(bool b
) { Verbose
= b
; }
1861 /// Emit all buffered diagnostics in order of sourcelocation.
1862 /// We need to output diagnostics produced while iterating through
1863 /// the lockset in deterministic order, so this function orders diagnostics
1864 /// and outputs them.
1865 void emitDiagnostics() {
1866 Warnings
.sort(SortDiagBySourceLocation(S
.getSourceManager()));
1867 for (const auto &Diag
: Warnings
) {
1868 S
.Diag(Diag
.first
.first
, Diag
.first
.second
);
1869 for (const auto &Note
: Diag
.second
)
1870 S
.Diag(Note
.first
, Note
.second
);
1874 void handleInvalidLockExp(SourceLocation Loc
) override
{
1875 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_cannot_resolve_lock
)
1877 Warnings
.emplace_back(std::move(Warning
), getNotes());
1880 void handleUnmatchedUnlock(StringRef Kind
, Name LockName
, SourceLocation Loc
,
1881 SourceLocation LocPreviousUnlock
) override
{
1882 if (Loc
.isInvalid())
1884 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_unlock_but_no_lock
)
1885 << Kind
<< LockName
);
1886 Warnings
.emplace_back(std::move(Warning
),
1887 makeUnlockedHereNote(LocPreviousUnlock
, Kind
));
1890 void handleIncorrectUnlockKind(StringRef Kind
, Name LockName
,
1891 LockKind Expected
, LockKind Received
,
1892 SourceLocation LocLocked
,
1893 SourceLocation LocUnlock
) override
{
1894 if (LocUnlock
.isInvalid())
1895 LocUnlock
= FunLocation
;
1896 PartialDiagnosticAt
Warning(
1897 LocUnlock
, S
.PDiag(diag::warn_unlock_kind_mismatch
)
1898 << Kind
<< LockName
<< Received
<< Expected
);
1899 Warnings
.emplace_back(std::move(Warning
),
1900 makeLockedHereNote(LocLocked
, Kind
));
1903 void handleDoubleLock(StringRef Kind
, Name LockName
, SourceLocation LocLocked
,
1904 SourceLocation LocDoubleLock
) override
{
1905 if (LocDoubleLock
.isInvalid())
1906 LocDoubleLock
= FunLocation
;
1907 PartialDiagnosticAt
Warning(LocDoubleLock
, S
.PDiag(diag::warn_double_lock
)
1908 << Kind
<< LockName
);
1909 Warnings
.emplace_back(std::move(Warning
),
1910 makeLockedHereNote(LocLocked
, Kind
));
1913 void handleMutexHeldEndOfScope(StringRef Kind
, Name LockName
,
1914 SourceLocation LocLocked
,
1915 SourceLocation LocEndOfScope
,
1916 LockErrorKind LEK
) override
{
1917 unsigned DiagID
= 0;
1919 case LEK_LockedSomePredecessors
:
1920 DiagID
= diag::warn_lock_some_predecessors
;
1922 case LEK_LockedSomeLoopIterations
:
1923 DiagID
= diag::warn_expecting_lock_held_on_loop
;
1925 case LEK_LockedAtEndOfFunction
:
1926 DiagID
= diag::warn_no_unlock
;
1928 case LEK_NotLockedAtEndOfFunction
:
1929 DiagID
= diag::warn_expecting_locked
;
1932 if (LocEndOfScope
.isInvalid())
1933 LocEndOfScope
= FunEndLocation
;
1935 PartialDiagnosticAt
Warning(LocEndOfScope
, S
.PDiag(DiagID
) << Kind
1937 Warnings
.emplace_back(std::move(Warning
),
1938 makeLockedHereNote(LocLocked
, Kind
));
1941 void handleExclusiveAndShared(StringRef Kind
, Name LockName
,
1942 SourceLocation Loc1
,
1943 SourceLocation Loc2
) override
{
1944 PartialDiagnosticAt
Warning(Loc1
,
1945 S
.PDiag(diag::warn_lock_exclusive_and_shared
)
1946 << Kind
<< LockName
);
1947 PartialDiagnosticAt
Note(Loc2
, S
.PDiag(diag::note_lock_exclusive_and_shared
)
1948 << Kind
<< LockName
);
1949 Warnings
.emplace_back(std::move(Warning
), getNotes(Note
));
1952 void handleNoMutexHeld(const NamedDecl
*D
, ProtectedOperationKind POK
,
1953 AccessKind AK
, SourceLocation Loc
) override
{
1954 assert((POK
== POK_VarAccess
|| POK
== POK_VarDereference
) &&
1955 "Only works for variables");
1956 unsigned DiagID
= POK
== POK_VarAccess
?
1957 diag::warn_variable_requires_any_lock
:
1958 diag::warn_var_deref_requires_any_lock
;
1959 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(DiagID
)
1960 << D
<< getLockKindFromAccessKind(AK
));
1961 Warnings
.emplace_back(std::move(Warning
), getNotes());
1964 void handleMutexNotHeld(StringRef Kind
, const NamedDecl
*D
,
1965 ProtectedOperationKind POK
, Name LockName
,
1966 LockKind LK
, SourceLocation Loc
,
1967 Name
*PossibleMatch
) override
{
1968 unsigned DiagID
= 0;
1969 if (PossibleMatch
) {
1972 DiagID
= diag::warn_variable_requires_lock_precise
;
1974 case POK_VarDereference
:
1975 DiagID
= diag::warn_var_deref_requires_lock_precise
;
1977 case POK_FunctionCall
:
1978 DiagID
= diag::warn_fun_requires_lock_precise
;
1981 DiagID
= diag::warn_guarded_pass_by_reference
;
1983 case POK_PtPassByRef
:
1984 DiagID
= diag::warn_pt_guarded_pass_by_reference
;
1986 case POK_ReturnByRef
:
1987 DiagID
= diag::warn_guarded_return_by_reference
;
1989 case POK_PtReturnByRef
:
1990 DiagID
= diag::warn_pt_guarded_return_by_reference
;
1993 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(DiagID
) << Kind
1996 PartialDiagnosticAt
Note(Loc
, S
.PDiag(diag::note_found_mutex_near_match
)
1998 if (Verbose
&& POK
== POK_VarAccess
) {
1999 PartialDiagnosticAt
VNote(D
->getLocation(),
2000 S
.PDiag(diag::note_guarded_by_declared_here
)
2001 << D
->getDeclName());
2002 Warnings
.emplace_back(std::move(Warning
), getNotes(Note
, VNote
));
2004 Warnings
.emplace_back(std::move(Warning
), getNotes(Note
));
2008 DiagID
= diag::warn_variable_requires_lock
;
2010 case POK_VarDereference
:
2011 DiagID
= diag::warn_var_deref_requires_lock
;
2013 case POK_FunctionCall
:
2014 DiagID
= diag::warn_fun_requires_lock
;
2017 DiagID
= diag::warn_guarded_pass_by_reference
;
2019 case POK_PtPassByRef
:
2020 DiagID
= diag::warn_pt_guarded_pass_by_reference
;
2022 case POK_ReturnByRef
:
2023 DiagID
= diag::warn_guarded_return_by_reference
;
2025 case POK_PtReturnByRef
:
2026 DiagID
= diag::warn_pt_guarded_return_by_reference
;
2029 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(DiagID
) << Kind
2032 if (Verbose
&& POK
== POK_VarAccess
) {
2033 PartialDiagnosticAt
Note(D
->getLocation(),
2034 S
.PDiag(diag::note_guarded_by_declared_here
));
2035 Warnings
.emplace_back(std::move(Warning
), getNotes(Note
));
2037 Warnings
.emplace_back(std::move(Warning
), getNotes());
2041 void handleNegativeNotHeld(StringRef Kind
, Name LockName
, Name Neg
,
2042 SourceLocation Loc
) override
{
2043 PartialDiagnosticAt
Warning(Loc
,
2044 S
.PDiag(diag::warn_acquire_requires_negative_cap
)
2045 << Kind
<< LockName
<< Neg
);
2046 Warnings
.emplace_back(std::move(Warning
), getNotes());
2049 void handleNegativeNotHeld(const NamedDecl
*D
, Name LockName
,
2050 SourceLocation Loc
) override
{
2051 PartialDiagnosticAt
Warning(
2052 Loc
, S
.PDiag(diag::warn_fun_requires_negative_cap
) << D
<< LockName
);
2053 Warnings
.emplace_back(std::move(Warning
), getNotes());
2056 void handleFunExcludesLock(StringRef Kind
, Name FunName
, Name LockName
,
2057 SourceLocation Loc
) override
{
2058 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_fun_excludes_mutex
)
2059 << Kind
<< FunName
<< LockName
);
2060 Warnings
.emplace_back(std::move(Warning
), getNotes());
2063 void handleLockAcquiredBefore(StringRef Kind
, Name L1Name
, Name L2Name
,
2064 SourceLocation Loc
) override
{
2065 PartialDiagnosticAt
Warning(Loc
,
2066 S
.PDiag(diag::warn_acquired_before
) << Kind
<< L1Name
<< L2Name
);
2067 Warnings
.emplace_back(std::move(Warning
), getNotes());
2070 void handleBeforeAfterCycle(Name L1Name
, SourceLocation Loc
) override
{
2071 PartialDiagnosticAt
Warning(Loc
,
2072 S
.PDiag(diag::warn_acquired_before_after_cycle
) << L1Name
);
2073 Warnings
.emplace_back(std::move(Warning
), getNotes());
2076 void enterFunction(const FunctionDecl
* FD
) override
{
2077 CurrentFunction
= FD
;
2080 void leaveFunction(const FunctionDecl
* FD
) override
{
2081 CurrentFunction
= nullptr;
2084 } // anonymous namespace
2085 } // namespace threadSafety
2086 } // namespace clang
2088 //===----------------------------------------------------------------------===//
2090 //===----------------------------------------------------------------------===//
2093 namespace consumed
{
2095 class ConsumedWarningsHandler
: public ConsumedWarningsHandlerBase
{
2102 ConsumedWarningsHandler(Sema
&S
) : S(S
) {}
2104 void emitDiagnostics() override
{
2105 Warnings
.sort(SortDiagBySourceLocation(S
.getSourceManager()));
2106 for (const auto &Diag
: Warnings
) {
2107 S
.Diag(Diag
.first
.first
, Diag
.first
.second
);
2108 for (const auto &Note
: Diag
.second
)
2109 S
.Diag(Note
.first
, Note
.second
);
2113 void warnLoopStateMismatch(SourceLocation Loc
,
2114 StringRef VariableName
) override
{
2115 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_loop_state_mismatch
) <<
2118 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2121 void warnParamReturnTypestateMismatch(SourceLocation Loc
,
2122 StringRef VariableName
,
2123 StringRef ExpectedState
,
2124 StringRef ObservedState
) override
{
2126 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2127 diag::warn_param_return_typestate_mismatch
) << VariableName
<<
2128 ExpectedState
<< ObservedState
);
2130 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2133 void warnParamTypestateMismatch(SourceLocation Loc
, StringRef ExpectedState
,
2134 StringRef ObservedState
) override
{
2136 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2137 diag::warn_param_typestate_mismatch
) << ExpectedState
<< ObservedState
);
2139 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2142 void warnReturnTypestateForUnconsumableType(SourceLocation Loc
,
2143 StringRef TypeName
) override
{
2144 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2145 diag::warn_return_typestate_for_unconsumable_type
) << TypeName
);
2147 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2150 void warnReturnTypestateMismatch(SourceLocation Loc
, StringRef ExpectedState
,
2151 StringRef ObservedState
) override
{
2153 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2154 diag::warn_return_typestate_mismatch
) << ExpectedState
<< ObservedState
);
2156 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2159 void warnUseOfTempInInvalidState(StringRef MethodName
, StringRef State
,
2160 SourceLocation Loc
) override
{
2162 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(
2163 diag::warn_use_of_temp_in_invalid_state
) << MethodName
<< State
);
2165 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2168 void warnUseInInvalidState(StringRef MethodName
, StringRef VariableName
,
2169 StringRef State
, SourceLocation Loc
) override
{
2171 PartialDiagnosticAt
Warning(Loc
, S
.PDiag(diag::warn_use_in_invalid_state
) <<
2172 MethodName
<< VariableName
<< State
);
2174 Warnings
.emplace_back(std::move(Warning
), OptionalNotes());
2177 } // anonymous namespace
2178 } // namespace consumed
2179 } // namespace clang
2181 //===----------------------------------------------------------------------===//
2182 // Unsafe buffer usage analysis.
2183 //===----------------------------------------------------------------------===//
2186 class UnsafeBufferUsageReporter
: public UnsafeBufferUsageHandler
{
2188 bool SuggestSuggestions
; // Recommend -fsafe-buffer-usage-suggestions?
2190 // Lists as a string the names of variables in `VarGroupForVD` except for `VD`
2192 std::string
listVariableGroupAsString(
2193 const VarDecl
*VD
, const ArrayRef
<const VarDecl
*> &VarGroupForVD
) const {
2194 if (VarGroupForVD
.size() <= 1)
2197 std::vector
<StringRef
> VarNames
;
2198 auto PutInQuotes
= [](StringRef S
) -> std::string
{
2199 return "'" + S
.str() + "'";
2202 for (auto *V
: VarGroupForVD
) {
2205 VarNames
.push_back(V
->getName());
2207 if (VarNames
.size() == 1) {
2208 return PutInQuotes(VarNames
[0]);
2210 if (VarNames
.size() == 2) {
2211 return PutInQuotes(VarNames
[0]) + " and " + PutInQuotes(VarNames
[1]);
2213 assert(VarGroupForVD
.size() > 3);
2214 const unsigned N
= VarNames
.size() -
2215 2; // need to print the last two names as "..., X, and Y"
2216 std::string AllVars
= "";
2218 for (unsigned I
= 0; I
< N
; ++I
)
2219 AllVars
.append(PutInQuotes(VarNames
[I
]) + ", ");
2220 AllVars
.append(PutInQuotes(VarNames
[N
]) + ", and " +
2221 PutInQuotes(VarNames
[N
+ 1]));
2226 UnsafeBufferUsageReporter(Sema
&S
, bool SuggestSuggestions
)
2227 : S(S
), SuggestSuggestions(SuggestSuggestions
) {}
2229 void handleUnsafeOperation(const Stmt
*Operation
,
2230 bool IsRelatedToDecl
) override
{
2233 unsigned MsgParam
= 0;
2234 if (const auto *ASE
= dyn_cast
<ArraySubscriptExpr
>(Operation
)) {
2235 Loc
= ASE
->getBase()->getExprLoc();
2236 Range
= ASE
->getBase()->getSourceRange();
2238 } else if (const auto *BO
= dyn_cast
<BinaryOperator
>(Operation
)) {
2239 BinaryOperator::Opcode Op
= BO
->getOpcode();
2240 if (Op
== BO_Add
|| Op
== BO_AddAssign
|| Op
== BO_Sub
||
2241 Op
== BO_SubAssign
) {
2242 if (BO
->getRHS()->getType()->isIntegerType()) {
2243 Loc
= BO
->getLHS()->getExprLoc();
2244 Range
= BO
->getLHS()->getSourceRange();
2246 Loc
= BO
->getRHS()->getExprLoc();
2247 Range
= BO
->getRHS()->getSourceRange();
2251 } else if (const auto *UO
= dyn_cast
<UnaryOperator
>(Operation
)) {
2252 UnaryOperator::Opcode Op
= UO
->getOpcode();
2253 if (Op
== UO_PreInc
|| Op
== UO_PreDec
|| Op
== UO_PostInc
||
2255 Loc
= UO
->getSubExpr()->getExprLoc();
2256 Range
= UO
->getSubExpr()->getSourceRange();
2260 if (isa
<CallExpr
>(Operation
)) {
2261 // note_unsafe_buffer_operation doesn't have this mode yet.
2262 assert(!IsRelatedToDecl
&& "Not implemented yet!");
2265 Loc
= Operation
->getBeginLoc();
2266 Range
= Operation
->getSourceRange();
2268 if (IsRelatedToDecl
) {
2269 assert(!SuggestSuggestions
&&
2270 "Variables blamed for unsafe buffer usage without suggestions!");
2271 S
.Diag(Loc
, diag::note_unsafe_buffer_operation
) << MsgParam
<< Range
;
2273 S
.Diag(Loc
, diag::warn_unsafe_buffer_operation
) << MsgParam
<< Range
;
2274 if (SuggestSuggestions
) {
2275 S
.Diag(Loc
, diag::note_safe_buffer_usage_suggestions_disabled
);
2280 void handleUnsafeVariableGroup(const VarDecl
*Variable
,
2281 const VariableGroupsManager
&VarGrpMgr
,
2282 FixItList
&&Fixes
, const Decl
*D
) override
{
2283 assert(!SuggestSuggestions
&&
2284 "Unsafe buffer usage fixits displayed without suggestions!");
2285 S
.Diag(Variable
->getLocation(), diag::warn_unsafe_buffer_variable
)
2286 << Variable
<< (Variable
->getType()->isPointerType() ? 0 : 1)
2287 << Variable
->getSourceRange();
2288 if (!Fixes
.empty()) {
2289 assert(isa
<NamedDecl
>(D
) &&
2290 "Fix-its are generated only for `NamedDecl`s");
2291 const NamedDecl
*ND
= cast
<NamedDecl
>(D
);
2292 bool BriefMsg
= false;
2293 // If the variable group involves parameters, the diagnostic message will
2294 // NOT explain how the variables are grouped as the reason is non-trivial
2295 // and irrelavant to users' experience:
2296 const auto VarGroupForVD
= VarGrpMgr
.getGroupOfVar(Variable
, &BriefMsg
);
2297 unsigned FixItStrategy
= 0; // For now we only have 'std::span' strategy
2299 S
.Diag(Variable
->getLocation(),
2300 BriefMsg
? diag::note_unsafe_buffer_variable_fixit_together
2301 : diag::note_unsafe_buffer_variable_fixit_group
);
2303 FD
<< Variable
<< FixItStrategy
;
2304 FD
<< listVariableGroupAsString(Variable
, VarGroupForVD
)
2305 << (VarGroupForVD
.size() > 1) << ND
;
2306 for (const auto &F
: Fixes
) {
2312 if (areDebugNotesRequested())
2313 for (const DebugNote
&Note
: DebugNotesByVar
[Variable
])
2314 S
.Diag(Note
.first
, diag::note_safe_buffer_debug_mode
) << Note
.second
;
2318 bool isSafeBufferOptOut(const SourceLocation
&Loc
) const override
{
2319 return S
.PP
.isSafeBufferOptOut(S
.getSourceManager(), Loc
);
2322 // Returns the text representation of clang::unsafe_buffer_usage attribute.
2323 // `WSSuffix` holds customized "white-space"s, e.g., newline or whilespace
2326 getUnsafeBufferUsageAttributeTextAt(SourceLocation Loc
,
2327 StringRef WSSuffix
= "") const override
{
2328 Preprocessor
&PP
= S
.getPreprocessor();
2329 TokenValue ClangUnsafeBufferUsageTokens
[] = {
2332 PP
.getIdentifierInfo("clang"),
2334 PP
.getIdentifierInfo("unsafe_buffer_usage"),
2338 StringRef MacroName
;
2340 // The returned macro (it returns) is guaranteed not to be function-like:
2341 MacroName
= PP
.getLastMacroWithSpelling(Loc
, ClangUnsafeBufferUsageTokens
);
2342 if (MacroName
.empty())
2343 MacroName
= "[[clang::unsafe_buffer_usage]]";
2344 return MacroName
.str() + WSSuffix
.str();
2349 //===----------------------------------------------------------------------===//
2350 // AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
2351 // warnings on a function, method, or block.
2352 //===----------------------------------------------------------------------===//
2354 sema::AnalysisBasedWarnings::Policy::Policy() {
2355 enableCheckFallThrough
= 1;
2356 enableCheckUnreachable
= 0;
2357 enableThreadSafetyAnalysis
= 0;
2358 enableConsumedAnalysis
= 0;
2361 /// InterProceduralData aims to be a storage of whatever data should be passed
2362 /// between analyses of different functions.
2364 /// At the moment, its primary goal is to make the information gathered during
2365 /// the analysis of the blocks available during the analysis of the enclosing
2366 /// function. This is important due to the fact that blocks are analyzed before
2367 /// the enclosed function is even parsed fully, so it is not viable to access
2368 /// anything in the outer scope while analyzing the block. On the other hand,
2369 /// re-building CFG for blocks and re-analyzing them when we do have all the
2370 /// information (i.e. during the analysis of the enclosing function) seems to be
2372 class sema::AnalysisBasedWarnings::InterProceduralData
{
2374 // It is important to analyze blocks within functions because it's a very
2375 // common pattern to capture completion handler parameters by blocks.
2376 CalledOnceInterProceduralData CalledOnceData
;
2379 static unsigned isEnabled(DiagnosticsEngine
&D
, unsigned diag
) {
2380 return (unsigned)!D
.isIgnored(diag
, SourceLocation());
2383 sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema
&s
)
2384 : S(s
), IPData(std::make_unique
<InterProceduralData
>()),
2385 NumFunctionsAnalyzed(0), NumFunctionsWithBadCFGs(0), NumCFGBlocks(0),
2386 MaxCFGBlocksPerFunction(0), NumUninitAnalysisFunctions(0),
2387 NumUninitAnalysisVariables(0), MaxUninitAnalysisVariablesPerFunction(0),
2388 NumUninitAnalysisBlockVisits(0),
2389 MaxUninitAnalysisBlockVisitsPerFunction(0) {
2391 using namespace diag
;
2392 DiagnosticsEngine
&D
= S
.getDiagnostics();
2394 DefaultPolicy
.enableCheckUnreachable
=
2395 isEnabled(D
, warn_unreachable
) || isEnabled(D
, warn_unreachable_break
) ||
2396 isEnabled(D
, warn_unreachable_return
) ||
2397 isEnabled(D
, warn_unreachable_loop_increment
);
2399 DefaultPolicy
.enableThreadSafetyAnalysis
= isEnabled(D
, warn_double_lock
);
2401 DefaultPolicy
.enableConsumedAnalysis
=
2402 isEnabled(D
, warn_use_in_invalid_state
);
2405 // We need this here for unique_ptr with forward declared class.
2406 sema::AnalysisBasedWarnings::~AnalysisBasedWarnings() = default;
2408 static void flushDiagnostics(Sema
&S
, const sema::FunctionScopeInfo
*fscope
) {
2409 for (const auto &D
: fscope
->PossiblyUnreachableDiags
)
2410 S
.Diag(D
.Loc
, D
.PD
);
2413 // An AST Visitor that calls a callback function on each callable DEFINITION
2414 // that is NOT in a dependent context:
2415 class CallableVisitor
: public RecursiveASTVisitor
<CallableVisitor
> {
2417 llvm::function_ref
<void(const Decl
*)> Callback
;
2420 CallableVisitor(llvm::function_ref
<void(const Decl
*)> Callback
)
2421 : Callback(Callback
) {}
2423 bool VisitFunctionDecl(FunctionDecl
*Node
) {
2424 if (cast
<DeclContext
>(Node
)->isDependentContext())
2425 return true; // Not to analyze dependent decl
2426 // `FunctionDecl->hasBody()` returns true if the function has a body
2427 // somewhere defined. But we want to know if this `Node` has a body
2428 // child. So we use `doesThisDeclarationHaveABody`:
2429 if (Node
->doesThisDeclarationHaveABody())
2434 bool VisitBlockDecl(BlockDecl
*Node
) {
2435 if (cast
<DeclContext
>(Node
)->isDependentContext())
2436 return true; // Not to analyze dependent decl
2441 bool VisitObjCMethodDecl(ObjCMethodDecl
*Node
) {
2442 if (cast
<DeclContext
>(Node
)->isDependentContext())
2443 return true; // Not to analyze dependent decl
2444 if (Node
->hasBody())
2449 bool VisitLambdaExpr(LambdaExpr
*Node
) {
2450 return VisitFunctionDecl(Node
->getCallOperator());
2453 bool shouldVisitTemplateInstantiations() const { return true; }
2454 bool shouldVisitImplicitCode() const { return false; }
2457 void clang::sema::AnalysisBasedWarnings::IssueWarnings(
2458 TranslationUnitDecl
*TU
) {
2460 return; // This is unexpected, give up quietly.
2462 DiagnosticsEngine
&Diags
= S
.getDiagnostics();
2464 if (S
.hasUncompilableErrorOccurred() || Diags
.getIgnoreAllWarnings())
2465 // exit if having uncompilable errors or ignoring all warnings:
2468 DiagnosticOptions
&DiagOpts
= Diags
.getDiagnosticOptions();
2470 // UnsafeBufferUsage analysis settings.
2471 bool UnsafeBufferUsageCanEmitSuggestions
= S
.getLangOpts().CPlusPlus20
;
2472 bool UnsafeBufferUsageShouldEmitSuggestions
= // Should != Can.
2473 UnsafeBufferUsageCanEmitSuggestions
&&
2474 DiagOpts
.ShowSafeBufferUsageSuggestions
;
2475 bool UnsafeBufferUsageShouldSuggestSuggestions
=
2476 UnsafeBufferUsageCanEmitSuggestions
&&
2477 !DiagOpts
.ShowSafeBufferUsageSuggestions
;
2478 UnsafeBufferUsageReporter
R(S
, UnsafeBufferUsageShouldSuggestSuggestions
);
2480 // The Callback function that performs analyses:
2481 auto CallAnalyzers
= [&](const Decl
*Node
) -> void {
2482 // Perform unsafe buffer usage analysis:
2483 if (!Diags
.isIgnored(diag::warn_unsafe_buffer_operation
,
2484 Node
->getBeginLoc()) ||
2485 !Diags
.isIgnored(diag::warn_unsafe_buffer_variable
,
2486 Node
->getBeginLoc())) {
2487 clang::checkUnsafeBufferUsage(Node
, R
,
2488 UnsafeBufferUsageShouldEmitSuggestions
);
2491 // More analysis ...
2493 // Emit per-function analysis-based warnings that require the whole-TU
2494 // reasoning. Check if any of them is enabled at all before scanning the AST:
2495 if (!Diags
.isIgnored(diag::warn_unsafe_buffer_operation
, SourceLocation()) ||
2496 !Diags
.isIgnored(diag::warn_unsafe_buffer_variable
, SourceLocation())) {
2497 CallableVisitor(CallAnalyzers
).TraverseTranslationUnitDecl(TU
);
2501 void clang::sema::AnalysisBasedWarnings::IssueWarnings(
2502 sema::AnalysisBasedWarnings::Policy P
, sema::FunctionScopeInfo
*fscope
,
2503 const Decl
*D
, QualType BlockType
) {
2505 // We avoid doing analysis-based warnings when there are errors for
2507 // (1) The CFGs often can't be constructed (if the body is invalid), so
2508 // don't bother trying.
2509 // (2) The code already has problems; running the analysis just takes more
2511 DiagnosticsEngine
&Diags
= S
.getDiagnostics();
2513 // Do not do any analysis if we are going to just ignore them.
2514 if (Diags
.getIgnoreAllWarnings() ||
2515 (Diags
.getSuppressSystemWarnings() &&
2516 S
.SourceMgr
.isInSystemHeader(D
->getLocation())))
2519 // For code in dependent contexts, we'll do this at instantiation time.
2520 if (cast
<DeclContext
>(D
)->isDependentContext())
2523 if (S
.hasUncompilableErrorOccurred()) {
2524 // Flush out any possibly unreachable diagnostics.
2525 flushDiagnostics(S
, fscope
);
2529 const Stmt
*Body
= D
->getBody();
2532 // Construct the analysis context with the specified CFG build options.
2533 AnalysisDeclContext
AC(/* AnalysisDeclContextManager */ nullptr, D
);
2535 // Don't generate EH edges for CallExprs as we'd like to avoid the n^2
2536 // explosion for destructors that can result and the compile time hit.
2537 AC
.getCFGBuildOptions().PruneTriviallyFalseEdges
= true;
2538 AC
.getCFGBuildOptions().AddEHEdges
= false;
2539 AC
.getCFGBuildOptions().AddInitializers
= true;
2540 AC
.getCFGBuildOptions().AddImplicitDtors
= true;
2541 AC
.getCFGBuildOptions().AddTemporaryDtors
= true;
2542 AC
.getCFGBuildOptions().AddCXXNewAllocator
= false;
2543 AC
.getCFGBuildOptions().AddCXXDefaultInitExprInCtors
= true;
2545 // Force that certain expressions appear as CFGElements in the CFG. This
2546 // is used to speed up various analyses.
2547 // FIXME: This isn't the right factoring. This is here for initial
2548 // prototyping, but we need a way for analyses to say what expressions they
2549 // expect to always be CFGElements and then fill in the BuildOptions
2550 // appropriately. This is essentially a layering violation.
2551 if (P
.enableCheckUnreachable
|| P
.enableThreadSafetyAnalysis
||
2552 P
.enableConsumedAnalysis
) {
2553 // Unreachable code analysis and thread safety require a linearized CFG.
2554 AC
.getCFGBuildOptions().setAllAlwaysAdd();
2557 AC
.getCFGBuildOptions()
2558 .setAlwaysAdd(Stmt::BinaryOperatorClass
)
2559 .setAlwaysAdd(Stmt::CompoundAssignOperatorClass
)
2560 .setAlwaysAdd(Stmt::BlockExprClass
)
2561 .setAlwaysAdd(Stmt::CStyleCastExprClass
)
2562 .setAlwaysAdd(Stmt::DeclRefExprClass
)
2563 .setAlwaysAdd(Stmt::ImplicitCastExprClass
)
2564 .setAlwaysAdd(Stmt::UnaryOperatorClass
);
2567 // Install the logical handler.
2568 std::optional
<LogicalErrorHandler
> LEH
;
2569 if (LogicalErrorHandler::hasActiveDiagnostics(Diags
, D
->getBeginLoc())) {
2571 AC
.getCFGBuildOptions().Observer
= &*LEH
;
2574 // Emit delayed diagnostics.
2575 if (!fscope
->PossiblyUnreachableDiags
.empty()) {
2576 bool analyzed
= false;
2578 // Register the expressions with the CFGBuilder.
2579 for (const auto &D
: fscope
->PossiblyUnreachableDiags
) {
2580 for (const Stmt
*S
: D
.Stmts
)
2581 AC
.registerForcedBlockExpression(S
);
2586 for (const auto &D
: fscope
->PossiblyUnreachableDiags
) {
2587 bool AllReachable
= true;
2588 for (const Stmt
*S
: D
.Stmts
) {
2589 const CFGBlock
*block
= AC
.getBlockForRegisteredExpression(S
);
2590 CFGReverseBlockReachabilityAnalysis
*cra
=
2591 AC
.getCFGReachablityAnalysis();
2592 // FIXME: We should be able to assert that block is non-null, but
2593 // the CFG analysis can skip potentially-evaluated expressions in
2594 // edge cases; see test/Sema/vla-2.c.
2596 // Can this block be reached from the entrance?
2597 if (!cra
->isReachable(&AC
.getCFG()->getEntry(), block
)) {
2598 AllReachable
= false;
2602 // If we cannot map to a basic block, assume the statement is
2607 S
.Diag(D
.Loc
, D
.PD
);
2612 flushDiagnostics(S
, fscope
);
2615 // Warning: check missing 'return'
2616 if (P
.enableCheckFallThrough
) {
2617 const CheckFallThroughDiagnostics
&CD
=
2619 ? CheckFallThroughDiagnostics::MakeForBlock()
2620 : (isa
<CXXMethodDecl
>(D
) &&
2621 cast
<CXXMethodDecl
>(D
)->getOverloadedOperator() == OO_Call
&&
2622 cast
<CXXMethodDecl
>(D
)->getParent()->isLambda())
2623 ? CheckFallThroughDiagnostics::MakeForLambda()
2624 : (fscope
->isCoroutine()
2625 ? CheckFallThroughDiagnostics::MakeForCoroutine(D
)
2626 : CheckFallThroughDiagnostics::MakeForFunction(D
)));
2627 CheckFallThroughForBody(S
, D
, Body
, BlockType
, CD
, AC
, fscope
);
2630 // Warning: check for unreachable code
2631 if (P
.enableCheckUnreachable
) {
2632 // Only check for unreachable code on non-template instantiations.
2633 // Different template instantiations can effectively change the control-flow
2634 // and it is very difficult to prove that a snippet of code in a template
2635 // is unreachable for all instantiations.
2636 bool isTemplateInstantiation
= false;
2637 if (const FunctionDecl
*Function
= dyn_cast
<FunctionDecl
>(D
))
2638 isTemplateInstantiation
= Function
->isTemplateInstantiation();
2639 if (!isTemplateInstantiation
)
2640 CheckUnreachable(S
, AC
);
2643 // Check for thread safety violations
2644 if (P
.enableThreadSafetyAnalysis
) {
2645 SourceLocation FL
= AC
.getDecl()->getLocation();
2646 SourceLocation FEL
= AC
.getDecl()->getEndLoc();
2647 threadSafety::ThreadSafetyReporter
Reporter(S
, FL
, FEL
);
2648 if (!Diags
.isIgnored(diag::warn_thread_safety_beta
, D
->getBeginLoc()))
2649 Reporter
.setIssueBetaWarnings(true);
2650 if (!Diags
.isIgnored(diag::warn_thread_safety_verbose
, D
->getBeginLoc()))
2651 Reporter
.setVerbose(true);
2653 threadSafety::runThreadSafetyAnalysis(AC
, Reporter
,
2654 &S
.ThreadSafetyDeclCache
);
2655 Reporter
.emitDiagnostics();
2658 // Check for violations of consumed properties.
2659 if (P
.enableConsumedAnalysis
) {
2660 consumed::ConsumedWarningsHandler
WarningHandler(S
);
2661 consumed::ConsumedAnalyzer
Analyzer(WarningHandler
);
2665 if (!Diags
.isIgnored(diag::warn_uninit_var
, D
->getBeginLoc()) ||
2666 !Diags
.isIgnored(diag::warn_sometimes_uninit_var
, D
->getBeginLoc()) ||
2667 !Diags
.isIgnored(diag::warn_maybe_uninit_var
, D
->getBeginLoc()) ||
2668 !Diags
.isIgnored(diag::warn_uninit_const_reference
, D
->getBeginLoc())) {
2669 if (CFG
*cfg
= AC
.getCFG()) {
2670 UninitValsDiagReporter
reporter(S
);
2671 UninitVariablesAnalysisStats stats
;
2672 std::memset(&stats
, 0, sizeof(UninitVariablesAnalysisStats
));
2673 runUninitializedVariablesAnalysis(*cast
<DeclContext
>(D
), *cfg
, AC
,
2676 if (S
.CollectStats
&& stats
.NumVariablesAnalyzed
> 0) {
2677 ++NumUninitAnalysisFunctions
;
2678 NumUninitAnalysisVariables
+= stats
.NumVariablesAnalyzed
;
2679 NumUninitAnalysisBlockVisits
+= stats
.NumBlockVisits
;
2680 MaxUninitAnalysisVariablesPerFunction
=
2681 std::max(MaxUninitAnalysisVariablesPerFunction
,
2682 stats
.NumVariablesAnalyzed
);
2683 MaxUninitAnalysisBlockVisitsPerFunction
=
2684 std::max(MaxUninitAnalysisBlockVisitsPerFunction
,
2685 stats
.NumBlockVisits
);
2690 // Check for violations of "called once" parameter properties.
2691 if (S
.getLangOpts().ObjC
&& !S
.getLangOpts().CPlusPlus
&&
2692 shouldAnalyzeCalledOnceParameters(Diags
, D
->getBeginLoc())) {
2694 CalledOnceCheckReporter
Reporter(S
, IPData
->CalledOnceData
);
2695 checkCalledOnceParameters(
2697 shouldAnalyzeCalledOnceConventions(Diags
, D
->getBeginLoc()));
2701 bool FallThroughDiagFull
=
2702 !Diags
.isIgnored(diag::warn_unannotated_fallthrough
, D
->getBeginLoc());
2703 bool FallThroughDiagPerFunction
= !Diags
.isIgnored(
2704 diag::warn_unannotated_fallthrough_per_function
, D
->getBeginLoc());
2705 if (FallThroughDiagFull
|| FallThroughDiagPerFunction
||
2706 fscope
->HasFallthroughStmt
) {
2707 DiagnoseSwitchLabelsFallthrough(S
, AC
, !FallThroughDiagFull
);
2710 if (S
.getLangOpts().ObjCWeak
&&
2711 !Diags
.isIgnored(diag::warn_arc_repeated_use_of_weak
, D
->getBeginLoc()))
2712 diagnoseRepeatedUseOfWeak(S
, fscope
, D
, AC
.getParentMap());
2715 // Check for infinite self-recursion in functions
2716 if (!Diags
.isIgnored(diag::warn_infinite_recursive_function
,
2717 D
->getBeginLoc())) {
2718 if (const FunctionDecl
*FD
= dyn_cast
<FunctionDecl
>(D
)) {
2719 checkRecursiveFunction(S
, FD
, Body
, AC
);
2723 // Check for throw out of non-throwing function.
2724 if (!Diags
.isIgnored(diag::warn_throw_in_noexcept_func
, D
->getBeginLoc()))
2725 if (const FunctionDecl
*FD
= dyn_cast
<FunctionDecl
>(D
))
2726 if (S
.getLangOpts().CPlusPlus
&& !fscope
->isCoroutine() && isNoexcept(FD
))
2727 checkThrowInNonThrowingFunc(S
, FD
, AC
);
2729 // If none of the previous checks caused a CFG build, trigger one here
2730 // for the logical error handler.
2731 if (LogicalErrorHandler::hasActiveDiagnostics(Diags
, D
->getBeginLoc())) {
2735 // Collect statistics about the CFG if it was built.
2736 if (S
.CollectStats
&& AC
.isCFGBuilt()) {
2737 ++NumFunctionsAnalyzed
;
2738 if (CFG
*cfg
= AC
.getCFG()) {
2739 // If we successfully built a CFG for this context, record some more
2740 // detail information about it.
2741 NumCFGBlocks
+= cfg
->getNumBlockIDs();
2742 MaxCFGBlocksPerFunction
= std::max(MaxCFGBlocksPerFunction
,
2743 cfg
->getNumBlockIDs());
2745 ++NumFunctionsWithBadCFGs
;
2750 void clang::sema::AnalysisBasedWarnings::PrintStats() const {
2751 llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
2753 unsigned NumCFGsBuilt
= NumFunctionsAnalyzed
- NumFunctionsWithBadCFGs
;
2754 unsigned AvgCFGBlocksPerFunction
=
2755 !NumCFGsBuilt
? 0 : NumCFGBlocks
/NumCFGsBuilt
;
2756 llvm::errs() << NumFunctionsAnalyzed
<< " functions analyzed ("
2757 << NumFunctionsWithBadCFGs
<< " w/o CFGs).\n"
2758 << " " << NumCFGBlocks
<< " CFG blocks built.\n"
2759 << " " << AvgCFGBlocksPerFunction
2760 << " average CFG blocks per function.\n"
2761 << " " << MaxCFGBlocksPerFunction
2762 << " max CFG blocks per function.\n";
2764 unsigned AvgUninitVariablesPerFunction
= !NumUninitAnalysisFunctions
? 0
2765 : NumUninitAnalysisVariables
/NumUninitAnalysisFunctions
;
2766 unsigned AvgUninitBlockVisitsPerFunction
= !NumUninitAnalysisFunctions
? 0
2767 : NumUninitAnalysisBlockVisits
/NumUninitAnalysisFunctions
;
2768 llvm::errs() << NumUninitAnalysisFunctions
2769 << " functions analyzed for uninitialiazed variables\n"
2770 << " " << NumUninitAnalysisVariables
<< " variables analyzed.\n"
2771 << " " << AvgUninitVariablesPerFunction
2772 << " average variables per function.\n"
2773 << " " << MaxUninitAnalysisVariablesPerFunction
2774 << " max variables per function.\n"
2775 << " " << NumUninitAnalysisBlockVisits
<< " block visits.\n"
2776 << " " << AvgUninitBlockVisitsPerFunction
2777 << " average block visits per function.\n"
2778 << " " << MaxUninitAnalysisBlockVisitsPerFunction
2779 << " max block visits per function.\n";